diff --git a/.env.example b/.env.example
new file mode 100644
index 000000000..2ce61dfb0
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,14 @@
+# after generating keys, you can set as environment variables
+JWT_PRIVATE_KEY="-----BEGIN PRIVATE KEY-----
+<<$(cat ./keys/private.pem)>>
+-----END PRIVATE KEY-----"
+JWT_PUBLIC_KEY="-----BEGIN PUBLIC KEY-----
+<<$(cat ./keys/public.pem)>>
+-----END PUBLIC KEY-----"
+CGO_ENABLED=0
+LOGS_DATABASE="postgresql://user:pass@localhost/logs?sslmode=disable"
+DATABASE="postgresql://user:pass@localhost/postgres?sslmode=disable"
+
+
+# optional ENV_VARS
+BCRYPT_COST=10 # default is 12
\ No newline at end of file
diff --git a/.github/FUNDING.yaml b/.github/FUNDING.yaml
deleted file mode 100644
index 5de7fa799..000000000
--- a/.github/FUNDING.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# These are supported funding model platforms
-
-github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
-patreon: # Replace with a single Patreon username
-open_collective: pocketbase
-ko_fi: # Replace with a single Ko-fi username
-tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
-community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
-liberapay: # Replace with a single Liberapay username
-issuehunt: # Replace with a single IssueHunt username
-otechie: # Replace with a single Otechie username
-lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
-custom: ['https://www.paypal.com/donate/?hosted_button_id=4DVXNL4B8WT98']
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index efeacfddc..7581d99b7 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -9,19 +9,19 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
- uses: actions/setup-node@v3
+ uses: actions/setup-node@v4
with:
- node-version: 20.8.1
+ node-version: 20.11.0
- name: Set up Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: '>=1.21.3'
+ go-version: '>=1.22.5'
# This step usually is not needed because the /ui/dist is pregenerated locally
# but its here to ensure that each release embeds the latest admin ui artifacts.
diff --git a/.gitignore b/.gitignore
index 4b4de1376..d157bfa72 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
-/.vscode/
+/.vscode/*
.idea
.DS_Store
@@ -23,4 +23,6 @@ pb_migrations/*
pb_data/*
server
*.exe
-keys/*
\ No newline at end of file
+keys/*
+.env
+!.vscode/launch.json
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 000000000..ba873e7a2
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,16 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Debug",
+ "type": "go",
+ "request": "launch",
+ "mode": "debug",
+ "program": "${workspaceRoot}/examples/base",
+ "buildFlags": "-tags=pq",
+ "args": ["serve", "--dev"],
+ "envFile": "${workspaceRoot}/.env",
+ "debugAdapter": "dlv-dap"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8dc29cdcf..0d23d3732 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,299 @@
+## v0.22.19
+
+- Added additional parsing for the Apple OAuth2 `user` token response field to attempt returning the name of the authenticated user ([#5074](https://github.com/pocketbase/pocketbase/discussions/5074#discussioncomment-10317207)).
+ _Note that Apple only returns the user object the first time the user authorizes the app (at least based on [their docs](https://developer.apple.com/documentation/sign_in_with_apple/sign_in_with_apple_js/configuring_your_webpage_for_sign_in_with_apple#3331292))._
+
+
+## v0.22.18
+
+- Improved files delete performance when using the local filesystem by adding a trailing slash to the `DeletePrefix` call to ensure that the list iterator will start "walking" from the prefix directory and not from its parent ([#5246](https://github.com/pocketbase/pocketbase/discussions/5246)).
+
+- Updated Go deps.
+
+
+## v0.22.17
+
+- Updated the `editor` field to use the latest TinyMCE 6.8.4 and enabled `convert_unsafe_embeds:true` by default per the security advisories.
+ _The Admin UI shouldn't be affected by the older TinyMCE because we don't use directly the vulnerable options/plugins and we have a default CSP, but it is recommended to update even just for silencing the CI/CD warnings._
+
+- Disabled mouse selection when changing the sidebar width.
+ _This should also fix the reported Firefox issue when the sidebar width "resets" on mouse release out of the page window._
+
+- Other minor improvements (updated the logs delete check and tests, normalized internal errors formatting, updated Go deps, etc.)
+
+
+## v0.22.16
+
+- Fixed the days calculation for triggering old logs deletion ([#5179](https://github.com/pocketbase/pocketbase/pull/5179); thanks @nehmeroumani).
+ _Note that the previous versions correctly delete only the logs older than the configured setting but due to the typo the delete query is invoked unnecessary on each logs batch write._
+
+
+## v0.22.15
+
+- Added mutex to `tests.TestMailer()` to minimize tests data race warnings ([#5157](https://github.com/pocketbase/pocketbase/issues/5157)).
+
+- Updated goja and the other Go dependencies.
+
+- Bumped the min Go version in the GitHub release action to Go 1.22.5 since it comes with [`net/http` security fixes](https://github.com/golang/go/issues?q=milestone%3AGo1.22.5).
+
+
+## v0.22.14
+
+- Added OAuth2 POST redirect support (in case of `response_mode=form_post`) to allow specifying scopes for the Apple OAuth2 integration.
+
+ Note 1: If you are using the "Manual code exchange" flow with Apple (aka. `authWithOAuth2Code()`), you need to either update your custom
+ redirect handler to accept POST requests OR if you want to keep the old behavior and don't need the Apple user's email - replace in the Apple authorization url `response_mode=form_post` back to `response_mode=query`.
+
+ Note 2: Existing users that have already logged in with Apple may need to revoke their access in order to see the email sharing options as shown in [this screenshot](https://github.com/pocketbase/pocketbase/discussions/5074#discussioncomment-9801855).
+ If you want to force the new consent screen you could register a new Apple OAuth2 app.
+
+- ⚠️ Fixed a security vulnerability related to the OAuth2 email autolinking (thanks to @dalurness for reporting it).
+
+ Just to be safe I've also published a [GitHub security advisory](https://github.com/pocketbase/pocketbase/security/advisories/GHSA-m93w-4fxv-r35v) (_may take some time to show up in the related security databases_).
+
+ In order to be exploited you must have **both** OAuth2 and Password auth methods enabled.
+
+ A possible attack scenario could be:
+ - a malicious actor register with the targeted user's email (it is unverified)
+ - at some later point in time the targeted user stumble on your app and decides to sign-up with OAuth2 (_this step could be also initiated by the attacker by sending an invite email to the targeted user_)
+ - on successful OAuth2 auth we search for an existing PocketBase user matching with the OAuth2 user's email and associate them
+ - because we haven't changed the password of the existing PocketBase user during the linking, the malicious actor has access to the targeted user account and will be able to login with the initially created email/password
+
+ To prevent this for happening we now reset the password for this specific case if the previously created user wasn't verified (an exception to this is if the linking is explicit/manual, aka. when you send `Authorization:TOKEN` with the OAuth2 auth call).
+
+ Additionally to warn users we now send an email alert in case the user has logged in with password but has at least one OAuth2 account linked. It looks something like:
+
+ _Hello,
+ Just to let you know that someone has logged in to your Acme account using a password while you already have OAuth2 GitLab auth linked.
+ If you have recently signed in with a password, you may disregard this email.
+ **If you don't recognize the above action, you should immediately change your Acme account password.**
+ Thanks,
+ Acme team_
+
+ The flow will be further improved with the [ongoing refactoring](https://github.com/pocketbase/pocketbase/discussions/4355) and we will start sending emails for "unrecognized device" logins (OTP and MFA is already implemented and will be available with the next v0.23.0 release in the near future).
+
+
+## v0.22.13
+
+- Fixed rules inconsistency for text literals when inside parenthesis ([#5017](https://github.com/pocketbase/pocketbase/issues/5017)).
+
+- Updated Go deps.
+
+
+## v0.22.12
+
+- Fixed calendar picker grid layout misalignment on Firefox ([#4865](https://github.com/pocketbase/pocketbase/issues/4865)).
+
+- Updated Go deps and bumped the min Go version in the GitHub release action to Go 1.22.3 since it comes with [some minor security fixes](https://github.com/golang/go/issues?q=milestone%3AGo1.22.3).
+
+
+## v0.22.11
+
+- Load the full record in the relation picker edit panel ([#4857](https://github.com/pocketbase/pocketbase/issues/4857)).
+
+
+## v0.22.10
+
+- Updated the uploaded filename normalization to take double extensions in consideration ([#4824](https://github.com/pocketbase/pocketbase/issues/4824))
+
+- Added Collection models cache to help speed up the common List and View requests execution with ~25%.
+ _This was extracted from the ongoing work on [#4355](https://github.com/pocketbase/pocketbase/discussions/4355) and there are many other small optimizations already implemented but they will have to wait for the refactoring to be finalized._
+
+
+## v0.22.9
+
+- Fixed Admin UI OAuth2 "Clear all fields" btn action to properly unset all form fields ([#4737](https://github.com/pocketbase/pocketbase/issues/4737)).
+
+
+## v0.22.8
+
+- Fixed '~' auto wildcard wrapping when the param has escaped `%` character ([#4704](https://github.com/pocketbase/pocketbase/discussions/4704)).
+
+- Other minor UI improvements (added `aria-expanded=true/false` to the dropdown triggers, added contrasting border around the default mail template btn style, etc.).
+
+- Updated Go deps and bumped the min Go version in the GitHub release action to Go 1.22.2 since it comes with [some `net/http` security and bug fixes](https://github.com/golang/go/issues?q=milestone%3AGo1.22.2).
+
+
+## v0.22.7
+
+- Replaced the default `s3blob` driver with a trimmed vendored version to reduce the binary size with ~10MB.
+ _It can be further reduced with another ~10MB once we replace entirely the `aws-sdk-go-v2` dependency but I stumbled on some edge cases related to the headers signing and for now is on hold._
+
+- Other minor improvements (updated GitLab OAuth2 provider logo [#4650](https://github.com/pocketbase/pocketbase/pull/4650), normalized error messages, updated npm dependencies, etc.)
+
+
+## v0.22.6
+
+- Admin UI accessibility improvements:
+ - Fixed the dropdowns tab/enter/space keyboard navigation ([#4607](https://github.com/pocketbase/pocketbase/issues/4607)).
+ - Added `role`, `aria-label`, `aria-hidden` attributes to some of the elements in attempt to better assist screen readers.
+
+
+## v0.22.5
+
+- Minor test helpers fixes ([#4600](https://github.com/pocketbase/pocketbase/issues/4600)):
+ - Call the `OnTerminate` hook on `TestApp.Cleanup()`.
+ - Automatically run the DB migrations on initializing the test app with `tests.NewTestApp()`.
+
+- Added more elaborate warning message when restoring a backup explaining how the operation works.
+
+- Skip irregular files (symbolic links, sockets, etc.) when restoring a backup zip from the Admin UI or calling `archive.Extract(src, dst)` because they come with too many edge cases and ambiguities.
+
+ More details
+
+ This was initially reported as security issue (_thanks Harvey Spec_) but in the PocketBase context it is not something that can be exploited without an admin intervention and since the general expectations are that the PocketBase admins can do anything and they are the one who manage their server, this should be treated with the same diligence when using `scp`/`rsync`/`rclone`/etc. with untrusted file sources.
+
+ It is not possible (_or at least I'm not aware how to do that easily_) to perform virus/malicious content scanning on the uploaded backup archive files and some caution is always required when using the Admin UI or running shell commands, hence the backup-restore warning text.
+
+ **Or in other words, if someone sends you a file and tell you to upload it to your server (either as backup zip or manually via scp) obviously you shouldn't do that unless you really trust them.**
+
+ PocketBase is like any other regular application that you run on your server and there is no builtin "sandbox" for what the PocketBase process can execute. This is left to the developers to restrict on application or OS level depending on their needs. If you are self-hosting PocketBase you usually don't have to do that, but if you are offering PocketBase as a service and allow strangers to run their own PocketBase instances on your server then you'll need to implement the isolation mechanisms on your own.
+
+
+
+## v0.22.4
+
+- Removed conflicting styles causing the detailed codeblock log data preview to not visualize properly ([#4505](https://github.com/pocketbase/pocketbase/pull/4505)).
+
+- Minor JSVM improvements:
+ - Added `$filesystem.fileFromUrl(url, optSecTimeout)` helper.
+ - Implemented the `FormData` interface and added support for sending `multipart/form-data` requests with `$http.send()` ([#4544](https://github.com/pocketbase/pocketbase/discussions/4544)).
+
+
+## v0.22.3
+
+- Fixed the z-index of the current admin dropdown on Safari ([#4492](https://github.com/pocketbase/pocketbase/issues/4492)).
+
+- Fixed `OnAfterApiError` debug log `nil` error reference ([#4498](https://github.com/pocketbase/pocketbase/issues/4498)).
+
+- Added the field name as part of the `@request.data.someRelField.*` join to handle the case when a collection has 2 or more relation fields pointing to the same place ([#4500](https://github.com/pocketbase/pocketbase/issues/4500)).
+
+- Updated Go deps and bumped the min Go version in the GitHub release action to Go 1.22.1 since it comes with [some security fixes](https://github.com/golang/go/issues?q=milestone%3AGo1.22.1).
+
+
+## v0.22.2
+
+- Fixed a small regression introduced with v0.22.0 that was causing some missing unknown fields to always return an error instead of applying the specific `nullifyMisingField` resolver option to the query.
+
+
+## v0.22.1
+
+- Fixed Admin UI record and collection panels not reinitializing properly on browser back/forward navigation ([#4462](https://github.com/pocketbase/pocketbase/issues/4462)).
+
+- Initialize `RecordAuthWithOAuth2Event.IsNewRecord` for the `OnRecordBeforeAuthWithOAuth2Request` hook ([#4437](https://github.com/pocketbase/pocketbase/discussions/4437)).
+
+- Added error checks to the autogenerated Go migrations ([#4448](https://github.com/pocketbase/pocketbase/issues/4448)).
+
+
+## v0.22.0
+
+- Added Planning Center OAuth2 provider ([#4393](https://github.com/pocketbase/pocketbase/pull/4393); thanks @alxjsn).
+
+- Admin UI improvements:
+ - Autosync collection changes across multiple open browser tabs.
+ - Fixed vertical image popup preview scrolling.
+ - Added options to export a subset of collections.
+ - Added option to import a subset of collections without deleting the others ([#3403](https://github.com/pocketbase/pocketbase/issues/3403)).
+
+- Added support for back/indirect relation `filter`/`sort` (single and multiple).
+ The syntax to reference back relation fields is `yourCollection_via_yourRelField.*`.
+ ⚠️ To avoid excessive joins, the nested relations resolver is now limited to max 6 level depth (the same as `expand`).
+ _Note that in the future there will be also more advanced and granular options to specify a subset of the fields that are filterable/sortable._
+
+- Added support for multiple back/indirect relation `expand` and updated the keys to use the `_via_` reference syntax (`yourCollection_via_yourRelField`).
+ _To minimize the breaking changes, the old parenthesis reference syntax (`yourCollection(yourRelField)`) will still continue to work but it is soft-deprecated and there will be a console log reminding you to change it to the new one._
+
+- ⚠️ Collections and fields are no longer allowed to have `_via_` in their name to avoid collisions with the back/indirect relation reference syntax.
+
+- Added `jsvm.Config.OnInit` optional config function to allow registering custom Go bindings to the JSVM.
+
+- Added `@request.context` rule field that can be used to apply a different set of constraints based on the API rule execution context.
+ For example, to disallow user creation by an OAuth2 auth, you could set for the users Create API rule `@request.context != "oauth2"`.
+ The currently supported `@request.context` values are:
+ ```
+ default
+ realtime
+ protectedFile
+ oauth2
+ ```
+
+- Adjusted the `cron.Start()` to start the ticker at the `00` second of the cron interval ([#4394](https://github.com/pocketbase/pocketbase/discussions/4394)).
+ _Note that the cron format has only minute granularity and there is still no guarantee that the scheduled job will be always executed at the `00` second._
+
+- Fixed auto backups cron not reloading properly after app settings change ([#4431](https://github.com/pocketbase/pocketbase/discussions/4431)).
+
+- Upgraded to `aws-sdk-go-v2` and added special handling for GCS to workaround the previous [GCS headers signature issue](https://github.com/pocketbase/pocketbase/issues/2231) that we had with v2.
+ _This should also fix the SVG/JSON zero response when using Cloudflare R2 ([#4287](https://github.com/pocketbase/pocketbase/issues/4287#issuecomment-1925168142), [#2068](https://github.com/pocketbase/pocketbase/discussions/2068), [#2952](https://github.com/pocketbase/pocketbase/discussions/2952))._
+ _⚠️ If you are using S3 for uploaded files or backups, please verify that you have a green check in the Admin UI for your S3 configuration (I've tested the new version with GCS, MinIO, Cloudflare R2 and Wasabi)._
+
+- Added `:each` modifier support for `file` and `relation` type fields (_previously it was supported only for `select` type fields_).
+
+- Other minor improvements (updated the `ghupdate` plugin to use the configured executable name when printing to the console, fixed the error reporting of `admin update/delete` commands, etc.).
+
+
+## v0.21.3
+
+- Ignore the JS required validations for disabled OIDC providers ([#4322](https://github.com/pocketbase/pocketbase/issues/4322)).
+
+- Allow `HEAD` requests to the `/api/health` endpoint ([#4310](https://github.com/pocketbase/pocketbase/issues/4310)).
+
+- Fixed the `editor` field value when visualized inside the View collection preview panel.
+
+- Manually clear all TinyMCE events on editor removal (_workaround for [tinymce#9377](https://github.com/tinymce/tinymce/issues/9377)_).
+
+
+## v0.21.2
+
+- Fixed `@request.auth.*` initialization side-effect which caused the current authenticated user email to not being returned in the user auth response ([#2173](https://github.com/pocketbase/pocketbase/issues/2173#issuecomment-1932332038)).
+ _The current authenticated user email should be accessible always no matter of the `emailVisibility` state._
+
+- Fixed `RecordUpsert.RemoveFiles` godoc example.
+
+- Bumped to `NumCPU()+2` the `thumbGenSem` limit as some users reported that it was too restrictive.
+
+
+## v0.21.1
+
+- Small fix for the Admin UI related to the _Settings > Sync_ menu not being visible even when the "Hide controls" toggle is off.
+
+
+## v0.21.0
+
+- Added Bitbucket OAuth2 provider ([#3948](https://github.com/pocketbase/pocketbase/pull/3948); thanks @aabajyan).
+
+- Mark user as verified on confirm password reset ([#4066](https://github.com/pocketbase/pocketbase/issues/4066)).
+ _If the user email has changed after issuing the reset token (eg. updated by an admin), then the `verified` user state remains unchanged._
+
+- Added support for loading a serialized json payload for `multipart/form-data` requests using the special `@jsonPayload` key.
+ _This is intended to be used primarily by the SDKs to resolve [js-sdk#274](https://github.com/pocketbase/js-sdk/issues/274)._
+
+- Added graceful OAuth2 redirect error handling ([#4177](https://github.com/pocketbase/pocketbase/issues/4177)).
+ _Previously on redirect error we were returning directly a standard json error response. Now on redirect error we'll redirect to a generic OAuth2 failure screen (similar to the success one) and will attempt to auto close the OAuth2 popup._
+ _The SDKs are also updated to handle the OAuth2 redirect error and it will be returned as Promise rejection of the `authWithOAuth2()` call._
+
+- Exposed `$apis.gzip()` and `$apis.bodyLimit(bytes)` middlewares to the JSVM.
+
+- Added `TestMailer.SentMessages` field that holds all sent test app emails until cleanup.
+
+- Optimized the cascade delete of records with multiple `relation` fields.
+
+- Updated the `serve` and `admin` commands error reporting.
+
+- Minor Admin UI improvements (reduced the min table row height, added option to duplicate fields, added new TinyMCE codesample plugin languages, hide the collection sync settings when the `Settings.Meta.HideControls` is enabled, etc.)
+
+
+## v0.20.7
+
+- Fixed the Admin UI auto indexes update when renaming fields with a common prefix ([#4160](https://github.com/pocketbase/pocketbase/issues/4160)).
+
+
+## v0.20.6
+
+- Fixed JSVM types generation for functions with omitted arg types ([#4145](https://github.com/pocketbase/pocketbase/issues/4145)).
+
+- Updated Go deps.
+
+
## v0.20.5
- Minor CSS fix for the Admin UI to prevent the searchbar within a popup from expanding too much and pushing the controls out of the visible area ([#4079](https://github.com/pocketbase/pocketbase/issues/4079#issuecomment-1876994116)).
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 54c7cf90c..20aa0118d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -10,8 +10,8 @@ This document describes how to prepare a PR for a change in the main repository.
## Prerequisites
-- Go 1.19+ (for making changes in the Go code)
-- Node 16+ (for making changes in the Admin UI)
+- Go 1.21+ (for making changes in the Go code)
+- Node 18+ (for making changes in the Admin UI)
If you haven't already, you can fork the main repository and clone your fork so that you can work locally:
diff --git a/README.md b/README.md
index 60c2aa829..62f2e65a6 100644
--- a/README.md
+++ b/README.md
@@ -44,10 +44,17 @@ go mod download
# 1. postgres://user:pass@localhost/logs?sslmode=disable
# 2. minio: UI runs on port 9001 and API on 9000 (minio123:minio123)
# 2. s3://minio123:minio123@localhost:9000/public
-# (dont forget to manually create bucket called "public" via web ui to establish s3 connection from pocketbase)
+# 2.1. (dont forget to manually create bucket called "public" via web ui to establish s3 connection from pocketbase localhost:9001 user:minio123 pass:minio123 )
+# 2.2. Create new access key http://localhost:9001/access-keys and save it locally as we will use it later
+# 2.3. Configure the S3 file storage from pd http://127.0.0.1:8090/_/?#/settings/storage usin the following
+# 2.3. Endpoint: http://localhost:9000/public
+# 2.3. Bucket: public
+# 2.3. Region: us-east-1
+# 2.3. Access key & secret use the one you create at step 2.2
+# 2.3. Save Changes
# 3. mailhog: port: SMTP-1025 and UI-8025
# 3. smtp://localhost:1025 - http://localhost:8025
-docker-compose up -d
+docker compose up -d
# before run the project, you need to create and set RSA Public key pair for JWT before run the application.
# you can use following command to generate RSA key pair
diff --git a/apis/base.go b/apis/base.go
index 383c7e6fa..cd9b4a6a7 100644
--- a/apis/base.go
+++ b/apis/base.go
@@ -28,6 +28,7 @@ const trailedAdminPath = "/_/"
func InitApi(app core.App) (*echo.Echo, error) {
e := echo.New()
e.Debug = false
+ e.Binder = &rest.MultiBinder{}
e.JSONSerializer = &rest.Serializer{
FieldsParam: fieldsQueryParam,
}
@@ -82,7 +83,7 @@ func InitApi(app core.App) (*echo.Echo, error) {
logRequest(app, c, apiErr)
if c.Response().Committed {
- return // already commited
+ return // already committed
}
event := new(core.ApiErrorEvent)
@@ -105,7 +106,7 @@ func InitApi(app core.App) (*echo.Echo, error) {
if hookErr == nil {
if err := app.OnAfterApiError().Trigger(event); err != nil {
- app.Logger().Debug("OnAfterApiError failure", slog.String("error", hookErr.Error()))
+ app.Logger().Debug("OnAfterApiError failure", slog.String("error", err.Error()))
}
} else {
app.Logger().Debug("OnBeforeApiError error (truly rare case, eg. client already disconnected)", slog.String("error", hookErr.Error()))
diff --git a/apis/base_test.go b/apis/base_test.go
index 6e6dbe626..60f8bb780 100644
--- a/apis/base_test.go
+++ b/apis/base_test.go
@@ -10,6 +10,7 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/apis"
"github.com/AlperRehaYAZGAN/postgresbase/tests"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/rest"
"github.com/labstack/echo/v5"
"github.com/spf13/cast"
)
@@ -220,15 +221,24 @@ func TestRemoveTrailingSlashMiddleware(t *testing.T) {
}
}
-func TestEagerRequestInfoCache(t *testing.T) {
+func TestMultiBinder(t *testing.T) {
t.Parallel()
+ rawJson := `{"name":"test123"}`
+
+ formData, mp, err := tests.MockMultipartData(map[string]string{
+ rest.MultipartJsonKey: rawJson,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
scenarios := []tests.ApiScenario{
{
- Name: "custom non-api group route",
+ Name: "non-api group route",
Method: "POST",
Url: "/custom",
- Body: strings.NewReader(`{"name":"test123"}`),
+ Body: strings.NewReader(rawJson),
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
e.AddRoute(echo.Route{
Method: "POST",
@@ -242,11 +252,10 @@ func TestEagerRequestInfoCache(t *testing.T) {
return err
}
- // since the unknown method is not eager cache support
- // it should fail reading the json body twice
+ // try to read the body again
r := apis.RequestInfo(c)
- if v := cast.ToString(r.Data["name"]); v != "" {
- t.Fatalf("Expected empty request data body, got, %v", r.Data)
+ if v := cast.ToString(r.Data["name"]); v != "test123" {
+ t.Fatalf("Expected request data with name %q, got, %q", "test123", v)
}
return c.NoContent(200)
@@ -256,10 +265,10 @@ func TestEagerRequestInfoCache(t *testing.T) {
ExpectedStatus: 200,
},
{
- Name: "api group route with unsupported eager cache request method",
+ Name: "api group route",
Method: "GET",
Url: "/api/admins",
- Body: strings.NewReader(`{"name":"test123"}`),
+ Body: strings.NewReader(rawJson),
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
e.Use(func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@@ -273,11 +282,10 @@ func TestEagerRequestInfoCache(t *testing.T) {
}{}
c.Bind(data)
- // since the unknown method is not eager cache support
- // it should fail reading the json body twice
+ // try to read the body again
r := apis.RequestInfo(c)
- if v := cast.ToString(r.Data["name"]); v != "" {
- t.Fatalf("Expected empty request data body, got, %v", r.Data)
+ if v := cast.ToString(r.Data["name"]); v != "test123" {
+ t.Fatalf("Expected request data with name %q, got, %q", "test123", v)
}
return nil
@@ -287,22 +295,25 @@ func TestEagerRequestInfoCache(t *testing.T) {
ExpectedStatus: 200,
},
{
- Name: "api group route with supported eager cache request method",
+ Name: "custom route with @jsonPayload as multipart body",
Method: "POST",
- Url: "/api/admins",
- Body: strings.NewReader(`{"name":"test123"}`),
+ Url: "/custom",
+ Body: formData,
+ RequestHeaders: map[string]string{
+ "Content-Type": mp.FormDataContentType(),
+ },
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
- e.Use(func(next echo.HandlerFunc) echo.HandlerFunc {
- return func(c echo.Context) error {
- // it is not important whether the route handler return an error since
- // we just need to ensure that the eagerRequestInfoCache was registered
- next(c)
-
- // ensure that the body was read at least once
+ e.AddRoute(echo.Route{
+ Method: "POST",
+ Path: "/custom",
+ Handler: func(c echo.Context) error {
data := &struct {
Name string `json:"name"`
}{}
- c.Bind(data)
+
+ if err := c.Bind(data); err != nil {
+ return err
+ }
// try to read the body again
r := apis.RequestInfo(c)
@@ -310,8 +321,8 @@ func TestEagerRequestInfoCache(t *testing.T) {
t.Fatalf("Expected request data with name %q, got, %q", "test123", v)
}
- return nil
- }
+ return c.NoContent(200)
+ },
})
},
ExpectedStatus: 200,
diff --git a/apis/collection_test.go b/apis/collection_test.go
index 9203a4974..6e32bdf88 100644
--- a/apis/collection_test.go
+++ b/apis/collection_test.go
@@ -893,7 +893,8 @@ func TestCollectionUpdate(t *testing.T) {
{"type":"text","name":"password"},
{"type":"text","name":"passwordConfirm"},
{"type":"text","name":"oldPassword"}
- ]
+ ],
+ "indexes": []
}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
diff --git a/apis/file.go b/apis/file.go
index 267a90b06..4f6da576b 100644
--- a/apis/file.go
+++ b/apis/file.go
@@ -30,7 +30,7 @@ var defaultThumbSizes = []string{"100x100"}
func bindFileApi(app core.App, rg *echo.Group) {
api := fileApi{
app: app,
- thumbGenSem: semaphore.NewWeighted(int64(runtime.NumCPU() + 1)), // the value is arbitrary chosen and may change in the future
+ thumbGenSem: semaphore.NewWeighted(int64(runtime.NumCPU() + 2)), // the value is arbitrary chosen and may change in the future
thumbGenPending: new(singleflight.Group),
thumbGenMaxWait: 60 * time.Second,
}
@@ -111,7 +111,7 @@ func (api *fileApi) download(c echo.Context) error {
options, ok := fileField.Options.(*schema.FileOptions)
if !ok {
- return NewBadRequestError("", errors.New("Failed to load file options."))
+ return NewBadRequestError("", errors.New("failed to load file options"))
}
// check whether the request is authorized to view the protected file
@@ -122,6 +122,7 @@ func (api *fileApi) download(c echo.Context) error {
// create a copy of the cached request data and adjust it for the current auth model
requestInfo := *RequestInfo(c)
+ requestInfo.Context = models.RequestInfoContextProtectedFile
requestInfo.Admin = nil
requestInfo.AuthRecord = nil
if adminOrAuthRecord != nil {
diff --git a/apis/health.go b/apis/health.go
index e066d0607..c41e45b75 100644
--- a/apis/health.go
+++ b/apis/health.go
@@ -12,6 +12,7 @@ func bindHealthApi(app core.App, rg *echo.Group) {
api := healthApi{app: app}
subGroup := rg.Group("/health")
+ subGroup.HEAD("", api.healthCheck)
subGroup.GET("", api.healthCheck)
}
@@ -20,8 +21,8 @@ type healthApi struct {
}
type healthCheckResponse struct {
- Code int `json:"code"`
Message string `json:"message"`
+ Code int `json:"code"`
Data struct {
CanBackup bool `json:"canBackup"`
} `json:"data"`
@@ -29,6 +30,10 @@ type healthCheckResponse struct {
// healthCheck returns a 200 OK response if the server is healthy.
func (api *healthApi) healthCheck(c echo.Context) error {
+ if c.Request().Method == http.MethodHead {
+ return c.NoContent(http.StatusOK)
+ }
+
resp := new(healthCheckResponse)
resp.Code = http.StatusOK
resp.Message = "API is healthy."
diff --git a/apis/health_test.go b/apis/health_test.go
index 296153964..8a49c3973 100644
--- a/apis/health_test.go
+++ b/apis/health_test.go
@@ -12,7 +12,13 @@ func TestHealthAPI(t *testing.T) {
scenarios := []tests.ApiScenario{
{
- Name: "health status returns 200",
+ Name: "HEAD health status",
+ Method: http.MethodHead,
+ Url: "/api/health",
+ ExpectedStatus: 200,
+ },
+ {
+ Name: "GET health status",
Method: http.MethodGet,
Url: "/api/health",
ExpectedStatus: 200,
diff --git a/apis/middlewares.go b/apis/middlewares.go
index ea94da2b6..8a491a0f6 100644
--- a/apis/middlewares.go
+++ b/apis/middlewares.go
@@ -261,7 +261,7 @@ func LoadCollectionContext(app core.App, optCollectionTypes ...string) echo.Midd
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if param := c.PathParam("collection"); param != "" {
- collection, err := app.Dao().FindCollectionByNameOrId(param)
+ collection, err := core.FindCachedCollectionByNameOrId(app, param)
if err != nil || collection == nil {
return NewNotFoundError("", err)
}
@@ -402,6 +402,8 @@ func realUserIp(r *http.Request, fallbackIp string) string {
return fallbackIp
}
+// @todo consider removing as this may no longer be needed due to the custom rest.MultiBinder.
+//
// eagerRequestInfoCache ensures that the request data is cached in the request
// context to allow reading for example the json request body data more than once.
func eagerRequestInfoCache(app core.App) echo.MiddlewareFunc {
diff --git a/apis/realtime.go b/apis/realtime.go
index f31c6f961..26c3cee5a 100644
--- a/apis/realtime.go
+++ b/apis/realtime.go
@@ -371,7 +371,7 @@ type recordData struct {
func (api *realtimeApi) broadcastRecord(action string, record *models.Record, dryCache bool) error {
collection := record.Collection()
if collection == nil {
- return errors.New("[broadcastRecord] Record collection not set.")
+ return errors.New("[broadcastRecord] Record collection not set")
}
clients := api.app.SubscriptionsBroker().Clients()
@@ -409,6 +409,7 @@ func (api *realtimeApi) broadcastRecord(action string, record *models.Record, dr
// mock request data
requestInfo := &models.RequestInfo{
+ Context: models.RequestInfoContextRealtime,
Method: "GET",
Query: options.Query,
Headers: options.Headers,
diff --git a/apis/record_auth.go b/apis/record_auth.go
index 1072cb75d..a66f52b28 100644
--- a/apis/record_auth.go
+++ b/apis/record_auth.go
@@ -7,17 +7,21 @@ import (
"log/slog"
"net/http"
"sort"
+ "time"
"github.com/AlperRehaYAZGAN/postgresbase/core"
"github.com/AlperRehaYAZGAN/postgresbase/daos"
"github.com/AlperRehaYAZGAN/postgresbase/forms"
+ "github.com/AlperRehaYAZGAN/postgresbase/mails"
"github.com/AlperRehaYAZGAN/postgresbase/models"
+ "github.com/AlperRehaYAZGAN/postgresbase/models/schema"
"github.com/AlperRehaYAZGAN/postgresbase/resolvers"
"github.com/AlperRehaYAZGAN/postgresbase/tools/auth"
"github.com/AlperRehaYAZGAN/postgresbase/tools/routine"
"github.com/AlperRehaYAZGAN/postgresbase/tools/search"
"github.com/AlperRehaYAZGAN/postgresbase/tools/security"
"github.com/AlperRehaYAZGAN/postgresbase/tools/subscriptions"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/types"
"github.com/labstack/echo/v5"
"github.com/pocketbase/dbx"
"golang.org/x/oauth2"
@@ -30,6 +34,7 @@ func bindRecordAuthApi(app core.App, rg *echo.Group) {
// global oauth2 subscription redirect handler
rg.GET("/oauth2-redirect", api.oauth2SubscriptionRedirect)
+ rg.POST("/oauth2-redirect", api.oauth2SubscriptionRedirect) // needed in case of response_mode=form_post
// common collection record related routes
subGroup := rg.Group(
@@ -117,7 +122,7 @@ func (api *recordAuthApi) authMethods(c echo.Context) error {
provider, err := auth.NewProviderByName(name)
if err != nil {
- api.app.Logger().Debug("Missing or invalid provier name", slog.String("name", name))
+ api.app.Logger().Debug("Missing or invalid provider name", slog.String("name", name))
continue // skip provider
}
@@ -146,7 +151,7 @@ func (api *recordAuthApi) authMethods(c echo.Context) error {
switch name {
case auth.NameApple:
// see https://developer.apple.com/documentation/sign_in_with_apple/sign_in_with_apple_js/incorporating_sign_in_with_apple_into_other_platforms#3332113
- urlOpts = append(urlOpts, oauth2.SetAuthURLParam("response_mode", "query"))
+ urlOpts = append(urlOpts, oauth2.SetAuthURLParam("response_mode", "form_post"))
}
if provider.PKCE() {
@@ -201,13 +206,14 @@ func (api *recordAuthApi) authWithOAuth2(c echo.Context) error {
event.HttpContext = c
event.Collection = collection
event.ProviderName = form.Provider
- event.IsNewRecord = false
form.SetBeforeNewRecordCreateFunc(func(createForm *forms.RecordUpsert, authRecord *models.Record, authUser *auth.AuthUser) error {
return createForm.DrySubmit(func(txDao *daos.Dao) error {
event.IsNewRecord = true
+
// clone the current request data and assign the form create data as its body data
requestInfo := *RequestInfo(c)
+ requestInfo.Context = models.RequestInfoContextOAuth2
requestInfo.Data = form.CreateData
createRuleFunc := func(q *dbx.SelectQuery) error {
@@ -246,6 +252,7 @@ func (api *recordAuthApi) authWithOAuth2(c echo.Context) error {
event.Record = data.Record
event.OAuth2User = data.OAuth2User
event.ProviderClient = data.ProviderClient
+ event.IsNewRecord = data.Record == nil
return api.app.OnRecordBeforeAuthWithOAuth2Request().Trigger(event, func(e *core.RecordAuthWithOAuth2Event) error {
data.Record = e.Record
@@ -267,6 +274,14 @@ func (api *recordAuthApi) authWithOAuth2(c echo.Context) error {
}
return api.app.OnRecordAfterAuthWithOAuth2Request().Trigger(event, func(e *core.RecordAuthWithOAuth2Event) error {
+ // clear the lastLoginAlertSentAt field so that we can enforce password auth notifications
+ if !e.Record.LastLoginAlertSentAt().IsZero() {
+ e.Record.Set(schema.FieldNameLastLoginAlertSentAt, "")
+ if err := api.app.Dao().SaveRecord(e.Record); err != nil {
+ api.app.Logger().Warn("Failed to reset lastLoginAlertSentAt", "error", err, "recordId", e.Record.Id)
+ }
+ }
+
return RecordAuthResponse(api.app, e.HttpContext, e.Record, meta)
})
})
@@ -302,6 +317,42 @@ func (api *recordAuthApi) authWithPassword(c echo.Context) error {
return NewBadRequestError("Failed to authenticate.", err)
}
+ // @todo remove after the refactoring
+ if collection.AuthOptions().AllowOAuth2Auth && e.Record.Email() != "" {
+ externalAuths, err := api.app.Dao().FindAllExternalAuthsByRecord(e.Record)
+ if err != nil {
+ return NewBadRequestError("Failed to authenticate.", err)
+ }
+ if len(externalAuths) > 0 {
+ lastLoginAlert := e.Record.LastLoginAlertSentAt().Time()
+
+ // send an email alert if the password auth is after OAuth2 auth (lastLoginAlert will be empty)
+ // or if it has been ~7 days since the last alert
+ if lastLoginAlert.IsZero() || time.Now().UTC().Sub(lastLoginAlert).Hours() > 168 {
+ providerNames := make([]string, len(externalAuths))
+ for i, ea := range externalAuths {
+ var name string
+ if provider, err := auth.NewProviderByName(ea.Provider); err == nil {
+ name = provider.DisplayName()
+ }
+ if name == "" {
+ name = ea.Provider
+ }
+ providerNames[i] = name
+ }
+
+ if err := mails.SendRecordPasswordLoginAlert(api.app, e.Record, providerNames...); err != nil {
+ return NewBadRequestError("Failed to authenticate.", err)
+ }
+
+ e.Record.SetLastLoginAlertSentAt(types.NowDateTime())
+ if err := api.app.Dao().SaveRecord(e.Record); err != nil {
+ api.app.Logger().Warn("Failed to update lastLoginAlertSentAt", "error", err, "recordId", e.Record.Id)
+ }
+ }
+ }
+ }
+
return api.app.OnRecordAfterAuthWithPasswordRequest().Trigger(event, func(e *core.RecordAuthWithPasswordEvent) error {
return RecordAuthResponse(api.app, e.HttpContext, e.Record, nil)
})
@@ -656,29 +707,46 @@ func (api *recordAuthApi) unlinkExternalAuth(c echo.Context) error {
// -------------------------------------------------------------------
-const oauth2SubscriptionTopic = "@oauth2"
+const (
+ oauth2SubscriptionTopic string = "@oauth2"
+ oauth2RedirectFailurePath string = "../_/#/auth/oauth2-redirect-failure"
+ oauth2RedirectSuccessPath string = "../_/#/auth/oauth2-redirect-success"
+)
+
+type oauth2RedirectData struct {
+ State string `form:"state" query:"state" json:"state"`
+ Code string `form:"code" query:"code" json:"code"`
+ Error string `form:"error" query:"error" json:"error,omitempty"`
+}
func (api *recordAuthApi) oauth2SubscriptionRedirect(c echo.Context) error {
- state := c.QueryParam("state")
- code := c.QueryParam("code")
+ redirectStatusCode := http.StatusTemporaryRedirect
+ if c.Request().Method != http.MethodGet {
+ redirectStatusCode = http.StatusSeeOther
+ }
- if code == "" || state == "" {
- return NewBadRequestError("Invalid OAuth2 redirect parameters.", nil)
+ data := oauth2RedirectData{}
+ if err := c.Bind(&data); err != nil {
+ api.app.Logger().Debug("Failed to read OAuth2 redirect data", "error", err)
+ return c.Redirect(redirectStatusCode, oauth2RedirectFailurePath)
}
- client, err := api.app.SubscriptionsBroker().ClientById(state)
- if err != nil || client.IsDiscarded() || !client.HasSubscription(oauth2SubscriptionTopic) {
- return NewNotFoundError("Missing or invalid OAuth2 subscription client.", err)
+ if data.State == "" {
+ api.app.Logger().Debug("Missing OAuth2 state parameter")
+ return c.Redirect(redirectStatusCode, oauth2RedirectFailurePath)
}
- data := map[string]string{
- "state": state,
- "code": code,
+ client, err := api.app.SubscriptionsBroker().ClientById(data.State)
+ if err != nil || client.IsDiscarded() || !client.HasSubscription(oauth2SubscriptionTopic) {
+ api.app.Logger().Debug("Missing or invalid OAuth2 subscription client", "error", err, "clientId", data.State)
+ return c.Redirect(redirectStatusCode, oauth2RedirectFailurePath)
}
+ defer client.Unsubscribe(oauth2SubscriptionTopic)
encodedData, err := json.Marshal(data)
if err != nil {
- return NewBadRequestError("Failed to marshalize OAuth2 redirect data.", err)
+ api.app.Logger().Debug("Failed to marshalize OAuth2 redirect data", "error", err)
+ return c.Redirect(redirectStatusCode, oauth2RedirectFailurePath)
}
msg := subscriptions.Message{
@@ -688,5 +756,10 @@ func (api *recordAuthApi) oauth2SubscriptionRedirect(c echo.Context) error {
client.Send(msg)
- return c.Redirect(http.StatusTemporaryRedirect, "../_/#/auth/oauth2-redirect")
+ if data.Error != "" || data.Code == "" {
+ api.app.Logger().Debug("Failed OAuth2 redirect due to an error or missing code parameter", "error", data.Error, "clientId", data.State)
+ return c.Redirect(redirectStatusCode, oauth2RedirectFailurePath)
+ }
+
+ return c.Redirect(redirectStatusCode, oauth2RedirectSuccessPath)
}
diff --git a/apis/record_auth_test.go b/apis/record_auth_test.go
index a276664e4..b0c25ca34 100644
--- a/apis/record_auth_test.go
+++ b/apis/record_auth_test.go
@@ -237,6 +237,9 @@ func TestRecordAuthWithPassword(t *testing.T) {
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
+ // lastLoginAlertSentAt update
+ "OnModelAfterUpdate": 1,
+ "OnModelBeforeUpdate": 1,
},
},
@@ -304,6 +307,9 @@ func TestRecordAuthWithPassword(t *testing.T) {
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
+ // lastLoginAlertSentAt update
+ "OnModelAfterUpdate": 1,
+ "OnModelBeforeUpdate": 1,
},
},
{
@@ -328,6 +334,9 @@ func TestRecordAuthWithPassword(t *testing.T) {
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
+ // lastLoginAlertSentAt update
+ "OnModelAfterUpdate": 1,
+ "OnModelBeforeUpdate": 1,
},
},
@@ -644,7 +653,7 @@ func TestRecordAuthConfirmPasswordReset(t *testing.T) {
},
},
{
- Name: "valid token and data",
+ Name: "valid token and data (unverified user)",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-password-reset",
Body: strings.NewReader(`{
@@ -659,6 +668,132 @@ func TestRecordAuthConfirmPasswordReset(t *testing.T) {
"OnRecordBeforeConfirmPasswordResetRequest": 1,
"OnRecordAfterConfirmPasswordResetRequest": 1,
},
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ user, err := app.Dao().FindAuthRecordByEmail("users", "test@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ if user.Verified() {
+ t.Fatalf("Expected the user to be unverified")
+ }
+ },
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ user, err := app.Dao().FindAuthRecordByToken(
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
+ app.Settings().RecordPasswordResetToken.Secret,
+ )
+ if err == nil {
+ t.Fatalf("Expected the password reset token to be invalidated")
+ }
+
+ user, err = app.Dao().FindAuthRecordByEmail("users", "test@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ if !user.Verified() {
+ t.Fatalf("Expected the user to be marked as verified")
+ }
+ },
+ },
+ {
+ Name: "valid token and data (unverified user with different email from the one in the token)",
+ Method: http.MethodPost,
+ Url: "/api/collections/users/confirm-password-reset",
+ Body: strings.NewReader(`{
+ "token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
+ "password":"12345678",
+ "passwordConfirm":"12345678"
+ }`),
+ ExpectedStatus: 204,
+ ExpectedEvents: map[string]int{
+ "OnModelAfterUpdate": 1,
+ "OnModelBeforeUpdate": 1,
+ "OnRecordBeforeConfirmPasswordResetRequest": 1,
+ "OnRecordAfterConfirmPasswordResetRequest": 1,
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ user, err := app.Dao().FindAuthRecordByEmail("users", "test@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ if user.Verified() {
+ t.Fatalf("Expected the user to be unverified")
+ }
+
+ // manually change the email to check whether the verified state will be updated
+ user.SetEmail("test_update@example.com")
+ if err := app.Dao().WithoutHooks().SaveRecord(user); err != nil {
+ t.Fatalf("Failed to update user test email")
+ }
+ },
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ user, err := app.Dao().FindAuthRecordByToken(
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
+ app.Settings().RecordPasswordResetToken.Secret,
+ )
+ if err == nil {
+ t.Fatalf("Expected the password reset token to be invalidated")
+ }
+
+ user, err = app.Dao().FindAuthRecordByEmail("users", "test_update@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ if user.Verified() {
+ t.Fatalf("Expected the user to remain unverified")
+ }
+ },
+ },
+ {
+ Name: "valid token and data (verified user)",
+ Method: http.MethodPost,
+ Url: "/api/collections/users/confirm-password-reset",
+ Body: strings.NewReader(`{
+ "token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
+ "password":"12345678",
+ "passwordConfirm":"12345678"
+ }`),
+ ExpectedStatus: 204,
+ ExpectedEvents: map[string]int{
+ "OnModelAfterUpdate": 1,
+ "OnModelBeforeUpdate": 1,
+ "OnRecordBeforeConfirmPasswordResetRequest": 1,
+ "OnRecordAfterConfirmPasswordResetRequest": 1,
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ user, err := app.Dao().FindAuthRecordByEmail("users", "test@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ // ensure that the user is already verified
+ user.SetVerified(true)
+ if err := app.Dao().WithoutHooks().SaveRecord(user); err != nil {
+ t.Fatalf("Failed to update user verified state")
+ }
+ },
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ user, err := app.Dao().FindAuthRecordByToken(
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
+ app.Settings().RecordPasswordResetToken.Secret,
+ )
+ if err == nil {
+ t.Fatalf("Expected the password reset token to be invalidated")
+ }
+
+ user, err = app.Dao().FindAuthRecordByEmail("users", "test@example.com")
+ if err != nil {
+ t.Fatalf("Failed to fetch confirm password user: %v", err)
+ }
+
+ if !user.Verified() {
+ t.Fatalf("Expected the user to remain verified")
+ }
+ },
},
{
Name: "OnRecordAfterConfirmPasswordResetRequest error response",
@@ -1377,114 +1512,227 @@ func TestRecordAuthUnlinkExternalsAuth(t *testing.T) {
func TestRecordAuthOAuth2Redirect(t *testing.T) {
t.Parallel()
- c1 := subscriptions.NewDefaultClient()
+ clientStubs := make([]map[string]subscriptions.Client, 0, 10)
+
+ for i := 0; i < 10; i++ {
+ c1 := subscriptions.NewDefaultClient()
+
+ c2 := subscriptions.NewDefaultClient()
+ c2.Subscribe("@oauth2")
+
+ c3 := subscriptions.NewDefaultClient()
+ c3.Subscribe("test1", "@oauth2")
+
+ c4 := subscriptions.NewDefaultClient()
+ c4.Subscribe("test1", "test2")
+
+ c5 := subscriptions.NewDefaultClient()
+ c5.Subscribe("@oauth2")
+ c5.Discard()
+
+ clientStubs = append(clientStubs, map[string]subscriptions.Client{
+ "c1": c1,
+ "c2": c2,
+ "c3": c3,
+ "c4": c4,
+ "c5": c5,
+ })
+ }
- c2 := subscriptions.NewDefaultClient()
- c2.Subscribe("@oauth2")
+ checkFailureRedirect := func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ loc := res.Header.Get("Location")
+ if !strings.Contains(loc, "/oauth2-redirect-failure") {
+ t.Fatalf("Expected failure redirect, got %q", loc)
+ }
+ }
- c3 := subscriptions.NewDefaultClient()
- c3.Subscribe("test1", "@oauth2")
+ checkSuccessRedirect := func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ loc := res.Header.Get("Location")
+ if !strings.Contains(loc, "/oauth2-redirect-success") {
+ t.Fatalf("Expected success redirect, got %q", loc)
+ }
+ }
- c4 := subscriptions.NewDefaultClient()
- c4.Subscribe("test1", "test2")
+ checkClientMessages := func(t *testing.T, clientId string, msg subscriptions.Message, expectedMessages map[string][]string) {
+ if len(expectedMessages[clientId]) == 0 {
+ t.Fatalf("Unexpected client %q message, got %s:\n%s", clientId, msg.Name, msg.Data)
+ }
- c5 := subscriptions.NewDefaultClient()
- c5.Subscribe("@oauth2")
- c5.Discard()
+ if msg.Name != "@oauth2" {
+ t.Fatalf("Expected @oauth2 msg.Name, got %q", msg.Name)
+ }
- beforeTestFunc := func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
- app.SubscriptionsBroker().Register(c1)
- app.SubscriptionsBroker().Register(c2)
- app.SubscriptionsBroker().Register(c3)
- app.SubscriptionsBroker().Register(c4)
- app.SubscriptionsBroker().Register(c5)
+ for _, txt := range expectedMessages[clientId] {
+ if !strings.Contains(string(msg.Data), txt) {
+ t.Fatalf("Failed to find %q in \n%s", txt, msg.Data)
+ }
+ }
+ }
+
+ beforeTestFunc := func(
+ clients map[string]subscriptions.Client,
+ expectedMessages map[string][]string,
+ ) func(*testing.T, *tests.TestApp, *echo.Echo) {
+ return func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ for _, client := range clients {
+ app.SubscriptionsBroker().Register(client)
+ }
+
+ ctx, cancelFunc := context.WithTimeout(context.Background(), 100*time.Millisecond)
+
+ // add to the app store so that it can be cancelled manually after test completion
+ app.Store().Set("cancelFunc", cancelFunc)
+
+ go func() {
+ defer cancelFunc()
+
+ for {
+ select {
+ case msg := <-clients["c1"].Channel():
+ checkClientMessages(t, "c1", msg, expectedMessages)
+ case msg := <-clients["c2"].Channel():
+ checkClientMessages(t, "c2", msg, expectedMessages)
+ case msg := <-clients["c3"].Channel():
+ checkClientMessages(t, "c3", msg, expectedMessages)
+ case msg := <-clients["c4"].Channel():
+ checkClientMessages(t, "c4", msg, expectedMessages)
+ case msg := <-clients["c5"].Channel():
+ checkClientMessages(t, "c5", msg, expectedMessages)
+ case <-ctx.Done():
+ for _, c := range clients {
+ close(c.Channel())
+ }
+ return
+ }
+ }
+ }()
+ }
}
scenarios := []tests.ApiScenario{
{
- Name: "no state query param",
- Method: http.MethodGet,
- Url: "/api/oauth2-redirect?code=123",
- ExpectedStatus: 400,
- ExpectedContent: []string{`"data":{}`},
+ Name: "no state query param",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?code=123",
+ BeforeTestFunc: beforeTestFunc(clientStubs[0], nil),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+ },
},
{
- Name: "no code query param",
- Method: http.MethodGet,
- Url: "/api/oauth2-redirect?state=" + c3.Id(),
- ExpectedStatus: 400,
- ExpectedContent: []string{`"data":{}`},
+ Name: "invalid or missing client",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?code=123&state=missing",
+ BeforeTestFunc: beforeTestFunc(clientStubs[1], nil),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+ },
},
{
- Name: "missing client",
- Method: http.MethodGet,
- Url: "/api/oauth2-redirect?code=123&state=missing",
- ExpectedStatus: 404,
- ExpectedContent: []string{`"data":{}`},
+ Name: "no code query param",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?state=" + clientStubs[2]["c3"].Id(),
+ BeforeTestFunc: beforeTestFunc(clientStubs[2], map[string][]string{
+ "c3": {`"state":"` + clientStubs[2]["c3"].Id(), `"code":""`},
+ }),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+
+ if clientStubs[2]["c3"].HasSubscription("@oauth2") {
+ t.Fatalf("Expected oauth2 subscription to be removed")
+ }
+ },
},
{
- Name: "discarded client with @oauth2 subscription",
- Method: http.MethodGet,
- Url: "/api/oauth2-redirect?code=123&state=" + c5.Id(),
- BeforeTestFunc: beforeTestFunc,
- ExpectedStatus: 404,
- ExpectedContent: []string{`"data":{}`},
+ Name: "error query param",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?error=example&code=123&state=" + clientStubs[3]["c3"].Id(),
+ BeforeTestFunc: beforeTestFunc(clientStubs[3], map[string][]string{
+ "c3": {`"state":"` + clientStubs[3]["c3"].Id(), `"code":"123"`, `"error":"example"`},
+ }),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+
+ if clientStubs[3]["c3"].HasSubscription("@oauth2") {
+ t.Fatalf("Expected oauth2 subscription to be removed")
+ }
+ },
},
{
- Name: "client without @oauth2 subscription",
- Method: http.MethodGet,
- Url: "/api/oauth2-redirect?code=123&state=" + c4.Id(),
- BeforeTestFunc: beforeTestFunc,
- ExpectedStatus: 404,
- ExpectedContent: []string{`"data":{}`},
+ Name: "discarded client with @oauth2 subscription",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?code=123&state=" + clientStubs[4]["c5"].Id(),
+ BeforeTestFunc: beforeTestFunc(clientStubs[4], nil),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+ },
+ },
+ {
+ Name: "client without @oauth2 subscription",
+ Method: http.MethodGet,
+ Url: "/api/oauth2-redirect?code=123&state=" + clientStubs[4]["c4"].Id(),
+ BeforeTestFunc: beforeTestFunc(clientStubs[5], nil),
+ ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkFailureRedirect(t, app, res)
+ },
},
{
Name: "client with @oauth2 subscription",
Method: http.MethodGet,
- Url: "/api/oauth2-redirect?code=123&state=" + c3.Id(),
- BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
- beforeTestFunc(t, app, e)
-
- ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second)
-
- go func() {
- defer cancelFunc()
- L:
- for {
- select {
- case <-c1.Channel():
- t.Error("Unexpected c1 message")
- break L
- case <-c2.Channel():
- t.Error("Unexpected c2 message")
- break L
- case msg := <-c3.Channel():
- if msg.Name != "@oauth2" {
- t.Errorf("Expected @oauth2 msg.Name, got %q", msg.Name)
- }
-
- expectedParams := []string{`"state"`, `"code"`}
- for _, p := range expectedParams {
- if !strings.Contains(string(msg.Data), p) {
- t.Errorf("Couldn't find %s in \n%v", p, msg.Data)
- }
- }
-
- break L
- case <-c4.Channel():
- t.Error("Unexpected c4 message")
- break L
- case <-c5.Channel():
- t.Error("Unexpected c5 message")
- break L
- case <-ctx.Done():
- t.Error("Context timeout reached")
- break L
- }
- }
- }()
- },
+ Url: "/api/oauth2-redirect?code=123&state=" + clientStubs[6]["c3"].Id(),
+ BeforeTestFunc: beforeTestFunc(clientStubs[6], map[string][]string{
+ "c3": {`"state":"` + clientStubs[6]["c3"].Id(), `"code":"123"`},
+ }),
ExpectedStatus: http.StatusTemporaryRedirect,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkSuccessRedirect(t, app, res)
+
+ if clientStubs[6]["c3"].HasSubscription("@oauth2") {
+ t.Fatalf("Expected oauth2 subscription to be removed")
+ }
+ },
+ },
+ {
+ Name: "(POST) client with @oauth2 subscription",
+ Method: http.MethodPost,
+ Url: "/api/oauth2-redirect",
+ Body: strings.NewReader("code=123&state=" + clientStubs[7]["c3"].Id()),
+ RequestHeaders: map[string]string{
+ "content-type": "application/x-www-form-urlencoded",
+ },
+ BeforeTestFunc: beforeTestFunc(clientStubs[7], map[string][]string{
+ "c3": {`"state":"` + clientStubs[7]["c3"].Id(), `"code":"123"`},
+ }),
+ ExpectedStatus: http.StatusSeeOther,
+ AfterTestFunc: func(t *testing.T, app *tests.TestApp, res *http.Response) {
+ app.Store().Get("cancelFunc").(context.CancelFunc)()
+
+ checkSuccessRedirect(t, app, res)
+
+ if clientStubs[7]["c3"].HasSubscription("@oauth2") {
+ t.Fatalf("Expected oauth2 subscription to be removed")
+ }
+ },
},
}
diff --git a/apis/record_crud.go b/apis/record_crud.go
index 43fd422b7..4add695d0 100644
--- a/apis/record_crud.go
+++ b/apis/record_crud.go
@@ -185,6 +185,11 @@ func (api *recordApi) create(c echo.Context) error {
return NewBadRequestError("Failed to load the submitted data due to invalid formatting.", err)
}
+ // force unset the verified state to prevent ManageRule misuse
+ if !hasFullManageAccess {
+ testForm.Verified = false
+ }
+
createRuleFunc := func(q *dbx.SelectQuery) error {
if *collection.CreateRule == "" {
return nil // no create rule to resolve
diff --git a/apis/record_crud_test.go b/apis/record_crud_test.go
index d19fbc48a..4142f3ce1 100644
--- a/apis/record_crud_test.go
+++ b/apis/record_crud_test.go
@@ -13,6 +13,8 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/core"
"github.com/AlperRehaYAZGAN/postgresbase/models"
"github.com/AlperRehaYAZGAN/postgresbase/tests"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/rest"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/types"
"github.com/labstack/echo/v5"
)
@@ -1030,6 +1032,20 @@ func TestRecordCrudCreate(t *testing.T) {
t.Fatal(err)
}
+ formData2, mp2, err2 := tests.MockMultipartData(map[string]string{
+ rest.MultipartJsonKey: `{"title": "title_test2", "testPayload": 123}`,
+ }, "files")
+ if err2 != nil {
+ t.Fatal(err2)
+ }
+
+ formData3, mp3, err3 := tests.MockMultipartData(map[string]string{
+ rest.MultipartJsonKey: `{"title": "title_test3", "testPayload": 123}`,
+ }, "files")
+ if err3 != nil {
+ t.Fatal(err3)
+ }
+
scenarios := []tests.ApiScenario{
{
Name: "missing collection",
@@ -1237,6 +1253,60 @@ func TestRecordCrudCreate(t *testing.T) {
"OnModelAfterCreate": 1,
},
},
+ {
+ Name: "submit via multipart form data with @jsonPayload key and unsatisfied @request.data rule",
+ Method: http.MethodPost,
+ Url: "/api/collections/demo3/records",
+ Body: formData2,
+ RequestHeaders: map[string]string{
+ "Content-Type": mp2.FormDataContentType(),
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ collection, err := app.Dao().FindCollectionByNameOrId("demo3")
+ if err != nil {
+ t.Fatalf("failed to find demo3 collection: %v", err)
+ }
+ collection.CreateRule = types.Pointer("@request.data.testPayload != 123")
+ if err := app.Dao().WithoutHooks().SaveCollection(collection); err != nil {
+ t.Fatalf("failed to update demo3 collection create rule: %v", err)
+ }
+ core.ReloadCachedCollections(app)
+ },
+ ExpectedStatus: 400,
+ ExpectedContent: []string{`"data":{}`},
+ },
+ {
+ Name: "submit via multipart form data with @jsonPayload key and satisfied @request.data rule",
+ Method: http.MethodPost,
+ Url: "/api/collections/demo3/records",
+ Body: formData3,
+ RequestHeaders: map[string]string{
+ "Content-Type": mp3.FormDataContentType(),
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ collection, err := app.Dao().FindCollectionByNameOrId("demo3")
+ if err != nil {
+ t.Fatalf("failed to find demo3 collection: %v", err)
+ }
+ collection.CreateRule = types.Pointer("@request.data.testPayload = 123")
+ if err := app.Dao().WithoutHooks().SaveCollection(collection); err != nil {
+ t.Fatalf("failed to update demo3 collection create rule: %v", err)
+ }
+ core.ReloadCachedCollections(app)
+ },
+ ExpectedStatus: 200,
+ ExpectedContent: []string{
+ `"id":"`,
+ `"title":"title_test3"`,
+ `"files":["`,
+ },
+ ExpectedEvents: map[string]int{
+ "OnRecordBeforeCreateRequest": 1,
+ "OnRecordAfterCreateRequest": 1,
+ "OnModelBeforeCreate": 1,
+ "OnModelAfterCreate": 1,
+ },
+ },
{
Name: "unique field error check",
Method: http.MethodPost,
@@ -1608,6 +1678,20 @@ func TestRecordCrudUpdate(t *testing.T) {
t.Fatal(err)
}
+ formData2, mp2, err2 := tests.MockMultipartData(map[string]string{
+ rest.MultipartJsonKey: `{"title": "title_test2", "testPayload": 123}`,
+ }, "files")
+ if err2 != nil {
+ t.Fatal(err2)
+ }
+
+ formData3, mp3, err3 := tests.MockMultipartData(map[string]string{
+ rest.MultipartJsonKey: `{"title": "title_test3", "testPayload": 123}`,
+ }, "files")
+ if err3 != nil {
+ t.Fatal(err3)
+ }
+
scenarios := []tests.ApiScenario{
{
Name: "missing collection",
@@ -1830,6 +1914,60 @@ func TestRecordCrudUpdate(t *testing.T) {
"OnModelAfterUpdate": 1,
},
},
+ {
+ Name: "submit via multipart form data with @jsonPayload key and unsatisfied @request.data rule",
+ Method: http.MethodPatch,
+ Url: "/api/collections/demo3/records/mk5fmymtx4wsprk",
+ Body: formData2,
+ RequestHeaders: map[string]string{
+ "Content-Type": mp2.FormDataContentType(),
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ collection, err := app.Dao().FindCollectionByNameOrId("demo3")
+ if err != nil {
+ t.Fatalf("failed to find demo3 collection: %v", err)
+ }
+ collection.UpdateRule = types.Pointer("@request.data.testPayload != 123")
+ if err := app.Dao().WithoutHooks().SaveCollection(collection); err != nil {
+ t.Fatalf("failed to update demo3 collection update rule: %v", err)
+ }
+ core.ReloadCachedCollections(app)
+ },
+ ExpectedStatus: 404,
+ ExpectedContent: []string{`"data":{}`},
+ },
+ {
+ Name: "submit via multipart form data with @jsonPayload key and satisfied @request.data rule",
+ Method: http.MethodPatch,
+ Url: "/api/collections/demo3/records/mk5fmymtx4wsprk",
+ Body: formData3,
+ RequestHeaders: map[string]string{
+ "Content-Type": mp3.FormDataContentType(),
+ },
+ BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
+ collection, err := app.Dao().FindCollectionByNameOrId("demo3")
+ if err != nil {
+ t.Fatalf("failed to find demo3 collection: %v", err)
+ }
+ collection.UpdateRule = types.Pointer("@request.data.testPayload = 123")
+ if err := app.Dao().WithoutHooks().SaveCollection(collection); err != nil {
+ t.Fatalf("failed to update demo3 collection update rule: %v", err)
+ }
+ core.ReloadCachedCollections(app)
+ },
+ ExpectedStatus: 200,
+ ExpectedContent: []string{
+ `"id":"mk5fmymtx4wsprk"`,
+ `"title":"title_test3"`,
+ `"files":["`,
+ },
+ ExpectedEvents: map[string]int{
+ "OnRecordBeforeUpdateRequest": 1,
+ "OnRecordAfterUpdateRequest": 1,
+ "OnModelBeforeUpdate": 1,
+ "OnModelAfterUpdate": 1,
+ },
+ },
{
Name: "OnRecordAfterUpdateRequest error response",
Method: http.MethodPatch,
diff --git a/apis/record_helpers.go b/apis/record_helpers.go
index 6097050ba..470f9b196 100644
--- a/apis/record_helpers.go
+++ b/apis/record_helpers.go
@@ -44,6 +44,7 @@ func RequestInfo(c echo.Context) *models.RequestInfo {
}
result := &models.RequestInfo{
+ Context: models.RequestInfoContextDefault,
Method: c.Request().Method,
Query: map[string]any{},
Data: map[string]any{},
@@ -78,7 +79,7 @@ func RecordAuthResponse(
finalizers ...func(token string) error,
) error {
if !authRecord.Verified() && authRecord.Collection().AuthOptions().OnlyVerified {
- return NewForbiddenError("Please verify your email first.", nil)
+ return NewForbiddenError("Please verify your account first.", nil)
}
token, tokenErr := tokens.NewRecordAuthToken(app, authRecord)
@@ -140,7 +141,7 @@ func RecordAuthResponse(
// EnrichRecord parses the request context and enrich the provided record:
// - expands relations (if defaultExpands and/or ?expand query param is set)
// - ensures that the emails of the auth record and its expanded auth relations
-// are visibe only for the current logged admin, record owner or record with manage access
+// are visible only for the current logged admin, record owner or record with manage access
func EnrichRecord(c echo.Context, dao *daos.Dao, record *models.Record, defaultExpands ...string) error {
return EnrichRecords(c, dao, []*models.Record{record}, defaultExpands...)
}
@@ -148,12 +149,12 @@ func EnrichRecord(c echo.Context, dao *daos.Dao, record *models.Record, defaultE
// EnrichRecords parses the request context and enriches the provided records:
// - expands relations (if defaultExpands and/or ?expand query param is set)
// - ensures that the emails of the auth records and their expanded auth relations
-// are visibe only for the current logged admin, record owner or record with manage access
+// are visible only for the current logged admin, record owner or record with manage access
func EnrichRecords(c echo.Context, dao *daos.Dao, records []*models.Record, defaultExpands ...string) error {
requestInfo := RequestInfo(c)
if err := autoIgnoreAuthRecordsEmailVisibility(dao, records, requestInfo); err != nil {
- return fmt.Errorf("Failed to resolve email visibility: %w", err)
+ return fmt.Errorf("failed to resolve email visibility: %w", err)
}
expands := defaultExpands
@@ -166,7 +167,7 @@ func EnrichRecords(c echo.Context, dao *daos.Dao, records []*models.Record, defa
errs := dao.ExpandRecords(records, expands, expandFetch(dao, requestInfo))
if len(errs) > 0 {
- return fmt.Errorf("Failed to expand: %v", errs)
+ return fmt.Errorf("failed to expand: %v", errs)
}
return nil
@@ -184,7 +185,7 @@ func expandFetch(
}
if relCollection.ViewRule == nil {
- return fmt.Errorf("Only admins can view collection %q records", relCollection.Name)
+ return fmt.Errorf("only admins can view collection %q records", relCollection.Name)
}
if *relCollection.ViewRule != "" {
diff --git a/apis/record_helpers_test.go b/apis/record_helpers_test.go
index 8ddc50d90..e73f1abff 100644
--- a/apis/record_helpers_test.go
+++ b/apis/record_helpers_test.go
@@ -87,7 +87,7 @@ func TestRecordAuthResponse(t *testing.T) {
t.Fatal(err)
}
- unverfiedAuthRecord, err := app.Dao().FindRecordById("clients", "o1y0dd0spd786md")
+ unverifiedAuthRecord, err := app.Dao().FindRecordById("clients", "o1y0dd0spd786md")
if err != nil {
t.Fatal(err)
}
@@ -108,7 +108,7 @@ func TestRecordAuthResponse(t *testing.T) {
},
{
name: "valid auth record but with unverified email in onlyVerified collection",
- record: unverfiedAuthRecord,
+ record: unverifiedAuthRecord,
expectError: true,
},
{
diff --git a/apis/settings_test.go b/apis/settings_test.go
index 4d8fc9925..ce23a72e6 100644
--- a/apis/settings_test.go
+++ b/apis/settings_test.go
@@ -84,6 +84,8 @@ func TestSettingsList(t *testing.T) {
`"yandexAuth":{`,
`"patreonAuth":{`,
`"mailcowAuth":{`,
+ `"bitbucketAuth":{`,
+ `"planningcenterAuth":{`,
`"secret":"******"`,
`"clientSecret":"******"`,
},
@@ -169,6 +171,8 @@ func TestSettingsSet(t *testing.T) {
`"yandexAuth":{`,
`"patreonAuth":{`,
`"mailcowAuth":{`,
+ `"bitbucketAuth":{`,
+ `"planningcenterAuth":{`,
`"secret":"******"`,
`"clientSecret":"******"`,
`"appName":"acme_test"`,
@@ -241,6 +245,8 @@ func TestSettingsSet(t *testing.T) {
`"yandexAuth":{`,
`"patreonAuth":{`,
`"mailcowAuth":{`,
+ `"bitbucketAuth":{`,
+ `"planningcenterAuth":{`,
`"secret":"******"`,
`"clientSecret":"******"`,
`"appName":"update_test"`,
diff --git a/cmd/admin.go b/cmd/admin.go
index 4c53ebb6e..780eef897 100644
--- a/cmd/admin.go
+++ b/cmd/admin.go
@@ -28,12 +28,10 @@ func NewAdminCommand(app core.App) *cobra.Command {
func adminCreateCommand(app core.App) *cobra.Command {
command := &cobra.Command{
- Use: "create",
- Example: "admin create test@example.com 1234567890",
- Short: "Creates a new admin account",
- // prevents printing the error log twice
- SilenceErrors: true,
- SilenceUsage: true,
+ Use: "create",
+ Example: "admin create test@example.com 1234567890",
+ Short: "Creates a new admin account",
+ SilenceUsage: true,
RunE: func(command *cobra.Command, args []string) error {
if len(args) != 2 {
return errors.New("Missing email and password arguments.")
@@ -51,6 +49,10 @@ func adminCreateCommand(app core.App) *cobra.Command {
admin.Email = args[0]
admin.SetPassword(args[1])
+ if !app.Dao().HasTable(admin.TableName()) {
+ return errors.New("Migration are not initialized yet. Please run 'migrate up' and try again.")
+ }
+
if err := app.Dao().SaveAdmin(admin); err != nil {
return fmt.Errorf("Failed to create new admin account: %v", err)
}
@@ -65,12 +67,10 @@ func adminCreateCommand(app core.App) *cobra.Command {
func adminUpdateCommand(app core.App) *cobra.Command {
command := &cobra.Command{
- Use: "update",
- Example: "admin update test@example.com 1234567890",
- Short: "Changes the password of a single admin account",
- // prevents printing the error log twice
- SilenceErrors: true,
- SilenceUsage: true,
+ Use: "update",
+ Example: "admin update test@example.com 1234567890",
+ Short: "Changes the password of a single admin account",
+ SilenceUsage: true,
RunE: func(command *cobra.Command, args []string) error {
if len(args) != 2 {
return errors.New("Missing email and password arguments.")
@@ -84,6 +84,10 @@ func adminUpdateCommand(app core.App) *cobra.Command {
return errors.New("The new password must be at least 8 chars long.")
}
+ if !app.Dao().HasTable((&models.Admin{}).TableName()) {
+ return errors.New("Migration are not initialized yet. Please run 'migrate up' and try again.")
+ }
+
admin, err := app.Dao().FindAdminByEmail(args[0])
if err != nil {
return fmt.Errorf("Admin with email %s doesn't exist.", args[0])
@@ -105,17 +109,19 @@ func adminUpdateCommand(app core.App) *cobra.Command {
func adminDeleteCommand(app core.App) *cobra.Command {
command := &cobra.Command{
- Use: "delete",
- Example: "admin delete test@example.com",
- Short: "Deletes an existing admin account",
- // prevents printing the error log twice
- SilenceErrors: true,
- SilenceUsage: true,
+ Use: "delete",
+ Example: "admin delete test@example.com",
+ Short: "Deletes an existing admin account",
+ SilenceUsage: true,
RunE: func(command *cobra.Command, args []string) error {
if len(args) == 0 || args[0] == "" || is.EmailFormat.Validate(args[0]) != nil {
return errors.New("Invalid or missing email address.")
}
+ if !app.Dao().HasTable((&models.Admin{}).TableName()) {
+ return errors.New("Migration are not initialized yet. Please run 'migrate up' and try again.")
+ }
+
admin, err := app.Dao().FindAdminByEmail(args[0])
if err != nil {
color.Yellow("Admin %s is already deleted.", args[0])
diff --git a/cmd/env.go b/cmd/env.go
new file mode 100644
index 000000000..4969c83ce
--- /dev/null
+++ b/cmd/env.go
@@ -0,0 +1,57 @@
+package cmd
+
+import (
+ "errors"
+ "os"
+ "path"
+ "sort"
+
+ "github.com/joho/godotenv"
+ "github.com/spf13/cobra"
+)
+
+// LoadEnv loads env-variables (KEY=VAL) from provided files and paths (by searching for .env file in the given path)
+//
+// Please note that loaded values
+// DO NOT OVERRIDE the existing
+// environmental variables
+func LoadEnv(pp ...string) error {
+ // preparse the input and try to figure out if .env should be appended
+ var checked = make([]string, 0, len(pp))
+ for _, p := range pp {
+ if s, err := os.Stat(p); err != nil {
+ return err
+ } else if s.IsDir() {
+ chk := path.Join(p, ".env")
+ if _, err = os.Stat(chk); err == nil {
+ // make sure only .env files
+ checked = append(checked, chk)
+ } else if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ } else {
+ checked = append(checked, p)
+ }
+ }
+
+ if len(checked) == 0 {
+ return nil
+ }
+
+ return godotenv.Load(checked...)
+}
+
+// EnvCommand outputs all loaded env variables
+func EnvCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "env",
+ Long: "Outputs list (sorted by key) of of all environmental variables. Can be used for diagnosis and debugging",
+ Run: func(cmd *cobra.Command, args []string) {
+ kv := os.Environ()
+ sort.Strings(kv)
+ for _, l := range kv {
+ cmd.Println(l)
+ }
+ },
+ }
+}
diff --git a/cmd/serve.go b/cmd/serve.go
index 37b463a02..2a699e5fe 100644
--- a/cmd/serve.go
+++ b/cmd/serve.go
@@ -1,7 +1,7 @@
package cmd
import (
- "log"
+ "errors"
"net/http"
"github.com/AlperRehaYAZGAN/postgresbase/apis"
@@ -17,10 +17,11 @@ func NewServeCommand(app core.App, showStartBanner bool) *cobra.Command {
var httpsAddr string
command := &cobra.Command{
- Use: "serve [domain(s)]",
- Args: cobra.ArbitraryArgs,
- Short: "Starts the web server (default to 127.0.0.1:8090 if no domain is specified)",
- Run: func(command *cobra.Command, args []string) {
+ Use: "serve [domain(s)]",
+ Args: cobra.ArbitraryArgs,
+ Short: "Starts the web server (default to 127.0.0.1:8090 if no domain is specified)",
+ SilenceUsage: true,
+ RunE: func(command *cobra.Command, args []string) error {
// set default listener addresses if at least one domain is specified
if len(args) > 0 {
if httpAddr == "" {
@@ -43,9 +44,11 @@ func NewServeCommand(app core.App, showStartBanner bool) *cobra.Command {
CertificateDomains: args,
})
- if err != http.ErrServerClosed {
- log.Fatalln(err)
+ if errors.Is(err, http.ErrServerClosed) {
+ return nil
}
+
+ return err
},
}
diff --git a/core/base.go b/core/base.go
index 52d230907..869b487ed 100644
--- a/core/base.go
+++ b/core/base.go
@@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"runtime"
+ "strings"
"syscall"
"time"
@@ -599,10 +600,10 @@ func (app *BaseApp) RefreshSettings() error {
return err
}
- // reload handler level (if initialized and not in dev mode)
- if !app.IsDev() && app.Logger() != nil {
+ // reload handler level (if initialized)
+ if app.Logger() != nil {
if h, ok := app.Logger().Handler().(*logger.BatchHandler); ok {
- h.SetLevel(slog.Level(app.settings.Logs.MinLevel))
+ h.SetLevel(app.getLoggerMinLevel())
}
}
@@ -1161,7 +1162,9 @@ func (app *BaseApp) registerDefaultHooks() {
// try to delete the storage files from deleted Collection, Records, etc. model
app.OnModelAfterDelete().Add(func(e *ModelEvent) error {
if m, ok := e.Model.(models.FilesManager); ok && m.BaseFilesPath() != "" {
- prefix := m.BaseFilesPath()
+ // ensure that there is a trailing slash so that the list iterator could start walking from the prefix
+ // (https://github.com/pocketbase/pocketbase/discussions/5246#discussioncomment-10128955)
+ prefix := strings.TrimRight(m.BaseFilesPath(), "/") + "/"
// run in the background for "optimistic" delete to avoid
// blocking the delete transaction
@@ -1182,27 +1185,38 @@ func (app *BaseApp) registerDefaultHooks() {
if err := app.initAutobackupHooks(); err != nil {
app.Logger().Error("Failed to init auto backup hooks", slog.String("error", err.Error()))
}
-}
-func (app *BaseApp) initLogger() error {
- duration := 3 * time.Second
- ticker := time.NewTicker(duration)
- done := make(chan bool)
+ registerCachedCollectionsAppHooks(app)
+}
- // Apply the min level only if it is not in develop
- // to allow printing the logs to the console.
- //
- // DB logs are still filtered but the checks for the min level are done
- // in the BatchOptions.BeforeAddFunc instead of the slog.Handler.Enabled() method.
+// getLoggerMinLevel returns the logger min level based on the
+// app configurations (dev mode, settings, etc.).
+//
+// If not in dev mode - returns the level from the app settings.
+//
+// If the app is in dev mode it returns -9999 level allowing to print
+// practically all logs to the terminal.
+// In this case DB logs are still filtered but the checks for the min level are done
+// in the BatchOptions.BeforeAddFunc instead of the slog.Handler.Enabled() method.
+func (app *BaseApp) getLoggerMinLevel() slog.Level {
var minLevel slog.Level
+
if app.IsDev() {
minLevel = -9999
} else if app.Settings() != nil {
minLevel = slog.Level(app.Settings().Logs.MinLevel)
}
+ return minLevel
+}
+
+func (app *BaseApp) initLogger() error {
+ duration := 3 * time.Second
+ ticker := time.NewTicker(duration)
+ done := make(chan bool)
+
handler := logger.NewBatchHandler(logger.BatchOptions{
- Level: minLevel,
+ Level: app.getLoggerMinLevel(),
BatchSize: 200,
BeforeAddFunc: func(ctx context.Context, log *logger.Log) bool {
if app.IsDev() {
@@ -1246,14 +1260,14 @@ func (app *BaseApp) initLogger() error {
return nil
})
+ // @todo replace with cron so that it doesn't rely on the logs write
+ //
// delete old logs
// ---
- logsMaxDays := app.Settings().Logs.MaxDays
now := time.Now()
lastLogsDeletedAt := cast.ToTime(app.Store().Get("lastLogsDeletedAt"))
- daysDiff := now.Sub(lastLogsDeletedAt).Hours() * 24
- if daysDiff > float64(logsMaxDays) {
- deleteErr := app.LogsDao().DeleteOldLogs(now.AddDate(0, 0, -1*logsMaxDays))
+ if now.Sub(lastLogsDeletedAt).Hours() >= 6 {
+ deleteErr := app.LogsDao().DeleteOldLogs(now.AddDate(0, 0, -1*app.Settings().Logs.MaxDays))
if deleteErr == nil {
app.Store().Set("lastLogsDeletedAt", now)
} else {
@@ -1271,7 +1285,7 @@ func (app *BaseApp) initLogger() error {
for {
select {
case <-done:
- handler.WriteAll(ctx)
+ return
case <-ticker.C:
handler.WriteAll(ctx)
}
@@ -1281,8 +1295,13 @@ func (app *BaseApp) initLogger() error {
app.logger = slog.New(handler)
app.OnTerminate().PreAdd(func(e *TerminateEvent) error {
+ // write all remaining logs before ticker.Stop to avoid races with ResetBootstrap user calls
+ handler.WriteAll(context.Background())
+
ticker.Stop()
+
done <- true
+
return nil
})
diff --git a/core/base_backup.go b/core/base_backup.go
index be08fc2e5..97e476e58 100644
--- a/core/base_backup.go
+++ b/core/base_backup.go
@@ -248,6 +248,16 @@ func (app *BaseApp) initAutobackupHooks() error {
loadJob := func() {
c.Stop()
+ // make sure that app.Settings() is always up to date
+ //
+ // @todo remove with the refactoring as core.App and daos.Dao will be one.
+ if err := app.RefreshSettings(); err != nil {
+ app.Logger().Debug(
+ "[Backup cron] Failed to get the latest app settings",
+ slog.String("error", err.Error()),
+ )
+ }
+
rawSchedule := app.Settings().Backups.Cron
if rawSchedule == "" || !isServe || !app.IsBootstrapped() {
return
diff --git a/core/base_test.go b/core/base_test.go
index 7927dd742..2ff061629 100644
--- a/core/base_test.go
+++ b/core/base_test.go
@@ -1,9 +1,12 @@
package core
import (
+ "context"
+ "database/sql"
"fmt"
"log/slog"
"os"
+ "strings"
"testing"
"time"
@@ -15,6 +18,8 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/tools/logger"
"github.com/AlperRehaYAZGAN/postgresbase/tools/mailer"
"github.com/AlperRehaYAZGAN/postgresbase/tools/migrate"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/types"
+ "github.com/pocketbase/dbx"
)
func TestNewBaseApp(t *testing.T) {
@@ -281,7 +286,7 @@ func TestBaseAppLoggerWrites(t *testing.T) {
}
defer cleanup()
- threshold := 200
+ const logsThreshold = 200
totalLogs := func(app App, t *testing.T) int {
var total int
@@ -294,24 +299,22 @@ func TestBaseAppLoggerWrites(t *testing.T) {
return total
}
- // disabled logs retention
- {
+ t.Run("disabled logs retention", func(t *testing.T) {
app.Settings().Logs.MaxDays = 0
- for i := 0; i < threshold+1; i++ {
+ for i := 0; i < logsThreshold+1; i++ {
app.Logger().Error("test")
}
if total := totalLogs(app, t); total != 0 {
t.Fatalf("Expected no logs, got %d", total)
}
- }
+ })
- // test batch logs writes
- {
+ t.Run("test batch logs writes", func(t *testing.T) {
app.Settings().Logs.MaxDays = 1
- for i := 0; i < threshold-1; i++ {
+ for i := 0; i < logsThreshold-1; i++ {
app.Logger().Error("test")
}
@@ -325,16 +328,102 @@ func TestBaseAppLoggerWrites(t *testing.T) {
// should be added for the next batch write
app.Logger().Error("test")
- if total := totalLogs(app, t); total != threshold {
- t.Fatalf("Expected %d logs, got %d", threshold, total)
+ if total := totalLogs(app, t); total != logsThreshold {
+ t.Fatalf("Expected %d logs, got %d", logsThreshold, total)
}
// wait for ~3 secs to check the timer trigger
time.Sleep(3200 * time.Millisecond)
- if total := totalLogs(app, t); total != threshold+1 {
- t.Fatalf("Expected %d logs, got %d", threshold+1, total)
+ if total := totalLogs(app, t); total != logsThreshold+1 {
+ t.Fatalf("Expected %d logs, got %d", logsThreshold+1, total)
}
- }
+ })
+
+ t.Run("test batch logs delete", func(t *testing.T) {
+ app.Settings().Logs.MaxDays = 2
+
+ deleteQueries := 0
+
+ // reset
+ app.Store().Set("lastLogsDeletedAt", time.Now())
+ if err := app.LogsDao().DeleteOldLogs(time.Now()); err != nil {
+ t.Fatal(err)
+ }
+
+ db := app.LogsDao().NonconcurrentDB().(*dbx.DB)
+ db.ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
+ if strings.Contains(sql, "DELETE") {
+ deleteQueries++
+ }
+ }
+
+ // trigger batch write (A)
+ expectedLogs := logsThreshold
+ for i := 0; i < expectedLogs; i++ {
+ app.Logger().Error("testA")
+ }
+
+ if total := totalLogs(app, t); total != expectedLogs {
+ t.Fatalf("[batch write A] Expected %d logs, got %d", expectedLogs, total)
+ }
+
+ // mark the A inserted logs as 2-day expired
+ aExpiredDate, err := types.ParseDateTime(time.Now().AddDate(0, 0, -2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = app.LogsDao().NonconcurrentDB().NewQuery("UPDATE _logs SET created={:date}, updated={:date}").Bind(dbx.Params{
+ "date": aExpiredDate.String(),
+ }).Execute()
+ if err != nil {
+ t.Fatalf("Failed to mock logs timestamp fields: %v", err)
+ }
+
+ // simulate recently deleted logs
+ app.Store().Set("lastLogsDeletedAt", time.Now().Add(-5*time.Hour))
+
+ // trigger batch write (B)
+ for i := 0; i < logsThreshold; i++ {
+ app.Logger().Error("testB")
+ }
+
+ expectedLogs = 2 * logsThreshold
+
+ // note: even though there are expired logs it shouldn't perform the delete operation because of the lastLogsDeledAt time
+ if total := totalLogs(app, t); total != expectedLogs {
+ t.Fatalf("[batch write B] Expected %d logs, got %d", expectedLogs, total)
+ }
+
+ // mark the B inserted logs as 1-day expired to ensure that they will not be deleted
+ bExpiredDate, err := types.ParseDateTime(time.Now().AddDate(0, 0, -1))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = app.LogsDao().NonconcurrentDB().NewQuery("UPDATE _logs SET created={:date}, updated={:date} where message='testB'").Bind(dbx.Params{
+ "date": bExpiredDate.String(),
+ }).Execute()
+ if err != nil {
+ t.Fatalf("Failed to mock logs timestamp fields: %v", err)
+ }
+
+ // should trigger delete on the next batch write
+ app.Store().Set("lastLogsDeletedAt", time.Now().Add(-6*time.Hour))
+
+ // trigger batch write (C)
+ for i := 0; i < logsThreshold; i++ {
+ app.Logger().Error("testC")
+ }
+
+ expectedLogs = 2 * logsThreshold // only B and C logs should remain
+
+ if total := totalLogs(app, t); total != expectedLogs {
+ t.Fatalf("[batch write C] Expected %d logs, got %d", expectedLogs, total)
+ }
+
+ if deleteQueries != 1 {
+ t.Fatalf("Expected DeleteOldLogs to be called %d, got %d", 1, deleteQueries)
+ }
+ })
}
func TestBaseAppRefreshSettingsLoggerMinLevelEnabled(t *testing.T) {
diff --git a/core/collections_cache.go b/core/collections_cache.go
new file mode 100644
index 000000000..7f30b302b
--- /dev/null
+++ b/core/collections_cache.go
@@ -0,0 +1,72 @@
+package core
+
+// -------------------------------------------------------------------
+// This is a small optimization ported from the [ongoing refactoring branch](https://github.com/pocketbase/pocketbase/discussions/4355).
+//
+// @todo remove after the refactoring is finalized.
+// -------------------------------------------------------------------
+
+import (
+ "strings"
+
+ "github.com/AlperRehaYAZGAN/postgresbase/models"
+)
+
+const storeCachedCollectionsKey = "@cachedCollectionsContext"
+
+func registerCachedCollectionsAppHooks(app App) {
+ collectionsChangeFunc := func(e *ModelEvent) error {
+ if _, ok := e.Model.(*models.Collection); !ok {
+ return nil
+ }
+
+ _ = ReloadCachedCollections(app)
+
+ return nil
+ }
+ app.OnModelAfterCreate().Add(collectionsChangeFunc)
+ app.OnModelAfterUpdate().Add(collectionsChangeFunc)
+ app.OnModelAfterDelete().Add(collectionsChangeFunc)
+ app.OnBeforeServe().Add(func(e *ServeEvent) error {
+ _ = ReloadCachedCollections(e.App)
+ return nil
+ })
+}
+
+func ReloadCachedCollections(app App) error {
+ collections := []*models.Collection{}
+
+ err := app.Dao().CollectionQuery().All(&collections)
+ if err != nil {
+ return err
+ }
+
+ app.Store().Set(storeCachedCollectionsKey, collections)
+
+ return nil
+}
+
+func FindCachedCollectionByNameOrId(app App, nameOrId string) (*models.Collection, error) {
+ // retrieve from the app cache
+ // ---
+ collections, _ := app.Store().Get(storeCachedCollectionsKey).([]*models.Collection)
+ for _, c := range collections {
+ if strings.EqualFold(c.Name, nameOrId) || c.Id == nameOrId {
+ return c, nil
+ }
+ }
+
+ // retrieve from the database
+ // ---
+ found, err := app.Dao().FindCollectionByNameOrId(nameOrId)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ReloadCachedCollections(app)
+ if err != nil {
+ app.Logger().Warn("Failed to reload collections cache", "error", err)
+ }
+
+ return found, nil
+}
diff --git a/daos/admin.go b/daos/admin.go
index 508531bd6..4ce8acb17 100644
--- a/daos/admin.go
+++ b/daos/admin.go
@@ -59,7 +59,7 @@ func (dao *Dao) FindAdminByToken(token string, baseTokenKey string) (*models.Adm
// check required claims
id, _ := unverifiedClaims["id"].(string)
if id == "" {
- return nil, errors.New("Missing or invalid token claims.")
+ return nil, errors.New("missing or invalid token claims")
}
admin, err := dao.FindAdminById(id)
@@ -116,7 +116,7 @@ func (dao *Dao) DeleteAdmin(admin *models.Admin) error {
}
if total == 1 {
- return errors.New("You cannot delete the only existing admin.")
+ return errors.New("you cannot delete the only existing admin")
}
return dao.Delete(admin)
diff --git a/daos/base.go b/daos/base.go
index 242728708..a749284d0 100644
--- a/daos/base.go
+++ b/daos/base.go
@@ -18,7 +18,7 @@ func New(db dbx.Builder) *Dao {
return NewMultiDB(db, db)
}
-// New creates a new Dao instance with the provided dedicated
+// NewMultiDB creates a new Dao instance with the provided dedicated
// async and sync db builders.
func NewMultiDB(concurrentDB, nonconcurrentDB dbx.Builder) *Dao {
return &Dao{
@@ -87,16 +87,16 @@ func (dao *Dao) Clone() *Dao {
// WithoutHooks returns a new Dao with the same configuration options
// as the current one, but without create/update/delete hooks.
func (dao *Dao) WithoutHooks() *Dao {
- new := dao.Clone()
+ clone := dao.Clone()
- new.BeforeCreateFunc = nil
- new.AfterCreateFunc = nil
- new.BeforeUpdateFunc = nil
- new.AfterUpdateFunc = nil
- new.BeforeDeleteFunc = nil
- new.AfterDeleteFunc = nil
+ clone.BeforeCreateFunc = nil
+ clone.AfterCreateFunc = nil
+ clone.BeforeUpdateFunc = nil
+ clone.AfterUpdateFunc = nil
+ clone.BeforeDeleteFunc = nil
+ clone.AfterDeleteFunc = nil
- return new
+ return clone
}
// ModelQuery creates a new preconfigured select query with preset
@@ -119,9 +119,9 @@ func (dao *Dao) FindById(m models.Model, id string) error {
}
type afterCallGroup struct {
- Action string
- EventDao *Dao
Model models.Model
+ EventDao *Dao
+ Action string
}
// RunInTransaction wraps fn into a transaction.
@@ -169,19 +169,19 @@ func (dao *Dao) RunInTransaction(fn func(txDao *Dao) error) error {
if dao.AfterCreateFunc != nil {
txDao.AfterCreateFunc = func(eventDao *Dao, m models.Model) error {
- afterCalls = append(afterCalls, afterCallGroup{"create", eventDao, m})
+ afterCalls = append(afterCalls, afterCallGroup{m, eventDao, "create"})
return nil
}
}
if dao.AfterUpdateFunc != nil {
txDao.AfterUpdateFunc = func(eventDao *Dao, m models.Model) error {
- afterCalls = append(afterCalls, afterCallGroup{"update", eventDao, m})
+ afterCalls = append(afterCalls, afterCallGroup{m, eventDao, "update"})
return nil
}
}
if dao.AfterDeleteFunc != nil {
txDao.AfterDeleteFunc = func(eventDao *Dao, m models.Model) error {
- afterCalls = append(afterCalls, afterCallGroup{"delete", eventDao, m})
+ afterCalls = append(afterCalls, afterCallGroup{m, eventDao, "delete"})
return nil
}
}
diff --git a/daos/collection.go b/daos/collection.go
index bc0760dbc..6e7f0fb7d 100644
--- a/daos/collection.go
+++ b/daos/collection.go
@@ -121,7 +121,7 @@ func (dao *Dao) FindCollectionReferences(collection *models.Collection, excludeI
// - is referenced as part of a relation field in another collection
func (dao *Dao) DeleteCollection(collection *models.Collection) error {
if collection.System {
- return fmt.Errorf("System collection %q cannot be deleted.", collection.Name)
+ return fmt.Errorf("system collection %q cannot be deleted", collection.Name)
}
// ensure that there aren't any existing references.
@@ -135,7 +135,7 @@ func (dao *Dao) DeleteCollection(collection *models.Collection) error {
for ref := range result {
names = append(names, ref.Name)
}
- return fmt.Errorf("The collection %q has external relation field references (%s).", collection.Name, strings.Join(names, ", "))
+ return fmt.Errorf("the collection %q has external relation field references (%s)", collection.Name, strings.Join(names, ", "))
}
return dao.RunInTransaction(func(txDao *Dao) error {
@@ -152,7 +152,7 @@ func (dao *Dao) DeleteCollection(collection *models.Collection) error {
// trigger views resave to check for dependencies
if err := txDao.resaveViewsWithChangedSchema(collection.Id); err != nil {
- return fmt.Errorf("The collection has a view dependency - %w", err)
+ return fmt.Errorf("the collection has a view dependency - %w", err)
}
return txDao.Delete(collection)
@@ -162,8 +162,8 @@ func (dao *Dao) DeleteCollection(collection *models.Collection) error {
// SaveCollection persists the provided Collection model and updates
// its related records table schema.
//
-// If collecction.IsNew() is true, the method will perform a create, otherwise an update.
-// To explicitly mark a collection for update you can use collecction.MarkAsNotNew().
+// If collection.IsNew() is true, the method will perform a create, otherwise an update.
+// To explicitly mark a collection for update you can use collection.MarkAsNotNew().
func (dao *Dao) SaveCollection(collection *models.Collection) error {
var oldCollection *models.Collection
@@ -227,7 +227,7 @@ func (dao *Dao) ImportCollections(
afterSync func(txDao *Dao, mappedImported, mappedExisting map[string]*models.Collection) error,
) error {
if len(importedCollections) == 0 {
- return errors.New("No collections to import")
+ return errors.New("no collections to import")
}
return dao.RunInTransaction(func(txDao *Dao) error {
@@ -263,11 +263,11 @@ func (dao *Dao) ImportCollections(
// extend existing schema
if !deleteMissing {
- schema, _ := existing.Schema.Clone()
+ schemaClone, _ := existing.Schema.Clone()
for _, f := range imported.Schema.Fields() {
- schema.AddField(f) // add or replace
+ schemaClone.AddField(f) // add or replace
}
- imported.Schema = *schema
+ imported.Schema = *schemaClone
}
} else {
imported.MarkAsNew()
@@ -285,7 +285,7 @@ func (dao *Dao) ImportCollections(
}
if existing.System {
- return fmt.Errorf("System collection %q cannot be deleted.", existing.Name)
+ return fmt.Errorf("system collection %q cannot be deleted", existing.Name)
}
// delete the related records table or view
diff --git a/daos/collection_test.go b/daos/collection_test.go
index cef209bec..c0947e605 100644
--- a/daos/collection_test.go
+++ b/daos/collection_test.go
@@ -153,9 +153,11 @@ func TestFindCollectionReferences(t *testing.T) {
"rel_one_no_cascade",
"rel_one_no_cascade_required",
"rel_one_cascade",
+ "rel_one_unique",
"rel_many_no_cascade",
"rel_many_no_cascade_required",
"rel_many_cascade",
+ "rel_many_unique",
}
for col, fields := range result {
@@ -756,7 +758,7 @@ func TestImportCollections(t *testing.T) {
"demo1": 15,
"demo2": 2,
"demo3": 2,
- "demo4": 11,
+ "demo4": 13,
"demo5": 6,
"new_import": 1,
}
@@ -774,37 +776,38 @@ func TestImportCollections(t *testing.T) {
},
}
- for _, scenario := range scenarios {
- testApp, _ := tests.NewTestApp()
- defer testApp.Cleanup()
+ for _, s := range scenarios {
+ t.Run(s.name, func(t *testing.T) {
+ testApp, _ := tests.NewTestApp()
+ defer testApp.Cleanup()
- importedCollections := []*models.Collection{}
+ importedCollections := []*models.Collection{}
- // load data
- loadErr := json.Unmarshal([]byte(scenario.jsonData), &importedCollections)
- if loadErr != nil {
- t.Fatalf("[%s] Failed to load data: %v", scenario.name, loadErr)
- continue
- }
+ // load data
+ loadErr := json.Unmarshal([]byte(s.jsonData), &importedCollections)
+ if loadErr != nil {
+ t.Fatalf("Failed to load data: %v", loadErr)
+ }
- err := testApp.Dao().ImportCollections(importedCollections, scenario.deleteMissing, scenario.beforeRecordsSync)
+ err := testApp.Dao().ImportCollections(importedCollections, s.deleteMissing, s.beforeRecordsSync)
- hasErr := err != nil
- if hasErr != scenario.expectError {
- t.Errorf("[%s] Expected hasErr to be %v, got %v (%v)", scenario.name, scenario.expectError, hasErr, err)
- }
+ hasErr := err != nil
+ if hasErr != s.expectError {
+ t.Fatalf("Expected hasErr to be %v, got %v (%v)", s.expectError, hasErr, err)
+ }
- // check collections count
- collections := []*models.Collection{}
- if err := testApp.Dao().CollectionQuery().All(&collections); err != nil {
- t.Fatal(err)
- }
- if len(collections) != scenario.expectCollectionsCount {
- t.Errorf("[%s] Expected %d collections, got %d", scenario.name, scenario.expectCollectionsCount, len(collections))
- }
+ // check collections count
+ collections := []*models.Collection{}
+ if err := testApp.Dao().CollectionQuery().All(&collections); err != nil {
+ t.Fatal(err)
+ }
+ if len(collections) != s.expectCollectionsCount {
+ t.Fatalf("Expected %d collections, got %d", s.expectCollectionsCount, len(collections))
+ }
- if scenario.afterTestFunc != nil {
- scenario.afterTestFunc(testApp, collections)
- }
+ if s.afterTestFunc != nil {
+ s.afterTestFunc(testApp, collections)
+ }
+ })
}
}
diff --git a/daos/log.go b/daos/log.go
index 593458ea3..755d735b5 100644
--- a/daos/log.go
+++ b/daos/log.go
@@ -39,6 +39,7 @@ func (dao *Dao) LogsStats(expr dbx.Expression) ([]*LogsStatsItem, error) {
result := []*LogsStatsItem{}
query := dao.LogQuery().
+ // !CHANGED: to support postgress standard date
Select("count(id) as total", "created as date").
GroupBy("date")
diff --git a/daos/record.go b/daos/record.go
index 328b01f37..f9286e4ee 100644
--- a/daos/record.go
+++ b/daos/record.go
@@ -40,16 +40,16 @@ func (dao *Dao) RecordQuery(collectionModelOrIdentifier any) *dbx.SelectQuery {
collection, collectionErr = dao.FindCollectionByNameOrId(c)
if collection != nil {
tableName = collection.Name
- } else {
- // update with some fake table name for easier debugging
- tableName = "@@__missing_" + c
}
default:
- // update with some fake table name for easier debugging
- tableName = "@@__invalidCollectionModelOrIdentifier"
collectionErr = errors.New("unsupported collection identifier, must be collection model, id or name")
}
+ // update with some fake table name for easier debugging
+ if tableName == "" {
+ tableName = "@@__invalidCollectionModelOrIdentifier"
+ }
+
selectCols := fmt.Sprintf("%s.*", dao.DB().QuoteSimpleColumnName(tableName))
query := dao.DB().Select(selectCols).From(tableName)
@@ -198,8 +198,6 @@ func (dao *Dao) FindRecordsByIds(
return records, nil
}
-// @todo consider to depricate as it may be easier to just use dao.RecordQuery()
-//
// FindRecordsByExpr finds all records by the specified db expression.
//
// Returns all collection records if no expressions are provided.
@@ -432,7 +430,7 @@ func (dao *Dao) FindAuthRecordByToken(token string, baseTokenKey string) (*model
}
if !record.Collection().IsAuth() {
- return nil, errors.New("The token is not associated to an auth collection record.")
+ return nil, errors.New("the token is not associated to an auth collection record")
}
verificationKey := record.TokenKey() + baseTokenKey
@@ -659,8 +657,6 @@ func (dao *Dao) DeleteRecord(record *models.Record) error {
//
// NB! This method is expected to be called inside a transaction.
func (dao *Dao) cascadeRecordDelete(mainRecord *models.Record, refs map[*models.Collection][]*schema.SchemaField) error {
- uniqueJsonEachAlias := "__je__" + security.PseudorandomString(4)
-
// @todo consider changing refs to a slice
//
// Sort the refs keys to ensure that the cascade events firing order is always the same.
@@ -684,15 +680,17 @@ func (dao *Dao) cascadeRecordDelete(mainRecord *models.Record, refs map[*models.
recordTableName := inflector.Columnify(refCollection.Name)
prefixedFieldName := recordTableName + "." + inflector.Columnify(field.Name)
- query := dao.RecordQuery(refCollection).Distinct(true)
+ query := dao.RecordQuery(refCollection)
if opt, ok := field.Options.(schema.MultiValuer); !ok || !opt.IsMultiple() {
query.AndWhere(dbx.HashExp{prefixedFieldName: mainRecord.Id})
} else {
- query.InnerJoin(fmt.Sprintf(
- `json_each(CASE WHEN json_valid([[%s]]) THEN [[%s]] ELSE json_array([[%s]]) END) as {{%s}}`,
- prefixedFieldName, prefixedFieldName, prefixedFieldName, uniqueJsonEachAlias,
- ), dbx.HashExp{uniqueJsonEachAlias + ".value": mainRecord.Id})
+ query.AndWhere(dbx.Exists(dbx.NewExp(fmt.Sprintf(
+ `SELECT 1 FROM json_each(CASE WHEN json_valid([[%s]]) THEN [[%s]] ELSE json_array([[%s]]) END) {{__je__}} WHERE [[__je__.value]]={:jevalue}`,
+ prefixedFieldName, prefixedFieldName, prefixedFieldName,
+ ), dbx.Params{
+ "jevalue": mainRecord.Id,
+ })))
}
if refCollection.Id == mainRecord.Collection().Id {
diff --git a/daos/record_expand.go b/daos/record_expand.go
index 4a503824b..7455de96f 100644
--- a/daos/record_expand.go
+++ b/daos/record_expand.go
@@ -1,14 +1,15 @@
package daos
import (
+ "errors"
"fmt"
+ "log"
"regexp"
"strings"
"github.com/AlperRehaYAZGAN/postgresbase/models"
"github.com/AlperRehaYAZGAN/postgresbase/models/schema"
"github.com/AlperRehaYAZGAN/postgresbase/tools/dbutils"
- "github.com/AlperRehaYAZGAN/postgresbase/tools/inflector"
"github.com/AlperRehaYAZGAN/postgresbase/tools/list"
"github.com/AlperRehaYAZGAN/postgresbase/tools/security"
"github.com/AlperRehaYAZGAN/postgresbase/tools/types"
@@ -16,6 +17,8 @@ import (
)
// MaxExpandDepth specifies the max allowed nested expand depth path.
+//
+// @todo Consider eventually reusing resolvers.maxNestedRels
const MaxExpandDepth = 6
// ExpandFetchFunc defines the function that is used to fetch the expanded relation records.
@@ -51,13 +54,15 @@ func (dao *Dao) ExpandRecords(records []*models.Record, expands []string, optFet
return failed
}
-var indirectExpandRegex = regexp.MustCompile(`^(\w+)\((\w+)\)$`)
+// Deprecated
+var indirectExpandRegexOld = regexp.MustCompile(`^(\w+)\((\w+)\)$`)
+
+var indirectExpandRegex = regexp.MustCompile(`^(\w+)_via_(\w+)$`)
// notes:
// - if fetchFunc is nil, dao.FindRecordsByIds will be used
// - all records are expected to be from the same collection
// - if MaxExpandDepth is reached, the function returns nil ignoring the remaining expand path
-// - indirect expands are supported only with single relation fields
func (dao *Dao) expandRecords(records []*models.Record, expandPath string, fetchFunc ExpandFetchFunc, recursionLevel int) error {
if fetchFunc == nil {
// load a default fetchFunc
@@ -77,70 +82,87 @@ func (dao *Dao) expandRecords(records []*models.Record, expandPath string, fetch
var relCollection *models.Collection
parts := strings.SplitN(expandPath, ".", 2)
- matches := indirectExpandRegex.FindStringSubmatch(parts[0])
+ var matches []string
+
+ // @todo remove the old syntax support
+ if strings.Contains(parts[0], "(") {
+ matches = indirectExpandRegexOld.FindStringSubmatch(parts[0])
+ if len(matches) == 3 {
+ log.Printf(
+ "%s expand format is deprecated and will be removed in the future. Consider replacing it with %s_via_%s.\n",
+ matches[0],
+ matches[1],
+ matches[2],
+ )
+ }
+ } else {
+ matches = indirectExpandRegex.FindStringSubmatch(parts[0])
+ }
if len(matches) == 3 {
indirectRel, _ := dao.FindCollectionByNameOrId(matches[1])
if indirectRel == nil {
- return fmt.Errorf("Couldn't find indirect related collection %q.", matches[1])
+ return fmt.Errorf("couldn't find back-related collection %q", matches[1])
}
indirectRelField := indirectRel.Schema.GetFieldByName(matches[2])
if indirectRelField == nil || indirectRelField.Type != schema.FieldTypeRelation {
- return fmt.Errorf("Couldn't find indirect relation field %q in collection %q.", matches[2], mainCollection.Name)
+ return fmt.Errorf("couldn't find back-relation field %q in collection %q", matches[2], indirectRel.Name)
}
indirectRelField.InitOptions()
indirectRelFieldOptions, _ := indirectRelField.Options.(*schema.RelationOptions)
if indirectRelFieldOptions == nil || indirectRelFieldOptions.CollectionId != mainCollection.Id {
- return fmt.Errorf("Invalid indirect relation field path %q.", parts[0])
- }
- if indirectRelFieldOptions.IsMultiple() {
- // for now don't allow multi-relation indirect fields expand
- // due to eventual poor query performance with large data sets.
- return fmt.Errorf("Multi-relation fields cannot be indirectly expanded in %q.", parts[0])
- }
-
- recordIds := make([]any, len(records))
- for i, record := range records {
- recordIds[i] = record.Id
+ return fmt.Errorf("invalid back-relation field path %q", parts[0])
}
- // @todo after the index optimizations consider allowing
- // indirect expand for multi-relation fields
- indirectRecords, err := dao.FindRecordsByExpr(
- indirectRel.Id,
- dbx.In(inflector.Columnify(matches[2]), recordIds...),
- )
- if err != nil {
- return err
- }
- mappedIndirectRecordIds := make(map[string][]string, len(indirectRecords))
- for _, indirectRecord := range indirectRecords {
- recId := indirectRecord.GetString(matches[2])
- if recId != "" {
- mappedIndirectRecordIds[recId] = append(mappedIndirectRecordIds[recId], indirectRecord.Id)
+ // add the related id(s) as a dynamic relation field value to
+ // allow further expand checks at later stage in a more unified manner
+ prepErr := func() error {
+ q := dao.DB().Select("id").
+ From(indirectRel.Name).
+ Limit(1000) // the limit is arbitrary chosen and may change in the future
+
+ if indirectRelFieldOptions.IsMultiple() {
+ q.AndWhere(dbx.Exists(dbx.NewExp(fmt.Sprintf(
+ "SELECT 1 FROM %s je WHERE je.value = {:id}",
+ dbutils.JsonEach(indirectRelField.Name),
+ ))))
+ } else {
+ q.AndWhere(dbx.NewExp("[[" + indirectRelField.Name + "]] = {:id}"))
}
- }
- // add the indirect relation ids as a new relation field value
- for _, record := range records {
- relIds, ok := mappedIndirectRecordIds[record.Id]
- if ok && len(relIds) > 0 {
- record.Set(parts[0], relIds)
+ pq := q.Build().Prepare()
+
+ for _, record := range records {
+ var relIds []string
+
+ err := pq.Bind(dbx.Params{"id": record.Id}).Column(&relIds)
+ if err != nil {
+ return errors.Join(err, pq.Close())
+ }
+
+ if len(relIds) > 0 {
+ record.Set(parts[0], relIds)
+ }
}
+
+ return pq.Close()
+ }()
+ if prepErr != nil {
+ return prepErr
}
relFieldOptions = &schema.RelationOptions{
MaxSelect: nil,
CollectionId: indirectRel.Id,
}
- if isRelFieldUnique(indirectRel, indirectRelField.Name) {
+ if dbutils.HasSingleColumnUniqueIndex(indirectRelField.Name, indirectRel.Indexes) {
relFieldOptions.MaxSelect = types.Pointer(1)
}
- // indirect relation
+ // indirect/back relation
relField = &schema.SchemaField{
- Id: "indirect_" + security.PseudorandomString(5),
+ Id: "_" + parts[0] + security.PseudorandomString(3),
Type: schema.FieldTypeRelation,
Name: parts[0],
Options: relFieldOptions,
diff --git a/daos/record_expand_test.go b/daos/record_expand_test.go
index 9289ab28e..43b478581 100644
--- a/daos/record_expand_test.go
+++ b/daos/record_expand_test.go
@@ -163,7 +163,7 @@ func TestExpandRecords(t *testing.T) {
0,
},
{
- "simple indirect expand",
+ "simple back single relation field expand (deprecated syntax)",
"demo3",
[]string{"lcl9d87w22ml6jy"},
[]string{"demo4(rel_one_no_cascade_required)"},
@@ -174,11 +174,22 @@ func TestExpandRecords(t *testing.T) {
0,
},
{
- "nested indirect expand",
+ "simple back expand via single relation field",
+ "demo3",
+ []string{"lcl9d87w22ml6jy"},
+ []string{"demo4_via_rel_one_no_cascade_required"},
+ func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ return app.Dao().FindRecordsByIds(c.Id, ids, nil)
+ },
+ 1,
+ 0,
+ },
+ {
+ "nested back expand via single relation field",
"demo3",
[]string{"lcl9d87w22ml6jy"},
[]string{
- "demo4(rel_one_no_cascade_required).self_rel_many.self_rel_many.self_rel_one",
+ "demo4_via_rel_one_no_cascade_required.self_rel_many.self_rel_many.self_rel_one",
},
func(c *models.Collection, ids []string) ([]*models.Record, error) {
return app.Dao().FindRecordsByIds(c.Id, ids, nil)
@@ -186,6 +197,19 @@ func TestExpandRecords(t *testing.T) {
5,
0,
},
+ {
+ "nested back expand via multiple relation field",
+ "demo3",
+ []string{"lcl9d87w22ml6jy"},
+ []string{
+ "demo4_via_rel_many_no_cascade_required.self_rel_many.rel_many_no_cascade_required.demo4_via_rel_many_no_cascade_required",
+ },
+ func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ return app.Dao().FindRecordsByIds(c.Id, ids, nil)
+ },
+ 7,
+ 0,
+ },
{
"expand multiple relations sharing a common path",
"demo4",
@@ -332,7 +356,7 @@ func TestExpandRecord(t *testing.T) {
0,
},
{
- "simple indirect expand",
+ "simple indirect expand via single relation field (deprecated syntax)",
"demo3",
"lcl9d87w22ml6jy",
[]string{"demo4(rel_one_no_cascade_required)"},
@@ -343,7 +367,18 @@ func TestExpandRecord(t *testing.T) {
0,
},
{
- "nested indirect expand",
+ "simple indirect expand via single relation field",
+ "demo3",
+ "lcl9d87w22ml6jy",
+ []string{"demo4_via_rel_one_no_cascade_required"},
+ func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ return app.Dao().FindRecordsByIds(c.Id, ids, nil)
+ },
+ 1,
+ 0,
+ },
+ {
+ "nested indirect expand via single relation field",
"demo3",
"lcl9d87w22ml6jy",
[]string{
@@ -355,6 +390,19 @@ func TestExpandRecord(t *testing.T) {
5,
0,
},
+ {
+ "nested indirect expand via single relation field",
+ "demo3",
+ "lcl9d87w22ml6jy",
+ []string{
+ "demo4_via_rel_many_no_cascade_required.self_rel_many.rel_many_no_cascade_required.demo4_via_rel_many_no_cascade_required",
+ },
+ func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ return app.Dao().FindRecordsByIds(c.Id, ids, nil)
+ },
+ 7,
+ 0,
+ },
}
for _, s := range scenarios {
@@ -388,21 +436,23 @@ func TestIndirectExpandSingeVsArrayResult(t *testing.T) {
// non-unique indirect expand
{
- errs := app.Dao().ExpandRecord(record, []string{"demo4(rel_one_cascade)"}, func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ errs := app.Dao().ExpandRecord(record, []string{"demo4_via_rel_one_cascade"}, func(c *models.Collection, ids []string) ([]*models.Record, error) {
return app.Dao().FindRecordsByIds(c.Id, ids, nil)
})
if len(errs) > 0 {
t.Fatal(errs)
}
- result, ok := record.Expand()["demo4(rel_one_cascade)"].([]*models.Record)
+ result, ok := record.Expand()["demo4_via_rel_one_cascade"].([]*models.Record)
if !ok {
t.Fatalf("Expected the expanded result to be a slice, got %v", result)
}
}
- // mock a unique constraint for the rel_one_cascade field
+ // unique indirect expand
{
+ // mock a unique constraint for the rel_one_cascade field
+ // ---
demo4, err := app.Dao().FindCollectionByNameOrId("demo4")
if err != nil {
t.Fatal(err)
@@ -413,18 +463,16 @@ func TestIndirectExpandSingeVsArrayResult(t *testing.T) {
if err := app.Dao().SaveCollection(demo4); err != nil {
t.Fatalf("Failed to mock unique constraint: %v", err)
}
- }
+ // ---
- // non-unique indirect expand
- {
- errs := app.Dao().ExpandRecord(record, []string{"demo4(rel_one_cascade)"}, func(c *models.Collection, ids []string) ([]*models.Record, error) {
+ errs := app.Dao().ExpandRecord(record, []string{"demo4_via_rel_one_cascade"}, func(c *models.Collection, ids []string) ([]*models.Record, error) {
return app.Dao().FindRecordsByIds(c.Id, ids, nil)
})
if len(errs) > 0 {
t.Fatal(errs)
}
- result, ok := record.Expand()["demo4(rel_one_cascade)"].(*models.Record)
+ result, ok := record.Expand()["demo4_via_rel_one_cascade"].(*models.Record)
if !ok {
t.Fatalf("Expected the expanded result to be a single model, got %v", result)
}
diff --git a/daos/record_table_sync.go b/daos/record_table_sync.go
index 358721563..1ec03887c 100644
--- a/daos/record_table_sync.go
+++ b/daos/record_table_sync.go
@@ -39,6 +39,7 @@ func (dao *Dao) SyncRecordTableSchema(newCollection *models.Collection, oldColle
cols[schema.FieldNamePasswordHash] = "TEXT NOT NULL"
cols[schema.FieldNameLastResetSentAt] = "TEXT DEFAULT '' NOT NULL"
cols[schema.FieldNameLastVerificationSentAt] = "TEXT DEFAULT '' NOT NULL"
+ cols[schema.FieldNameLastLoginAlertSentAt] = "TEXT DEFAULT '' NOT NULL"
}
// ensure that the new collection has an id
@@ -212,15 +213,15 @@ func (dao *Dao) normalizeSingleVsMultipleFieldChanges(newCollection, oldCollecti
if !isOldMultiple && isNewMultiple {
// single -> multiple (convert to array)
copyQuery = txDao.DB().NewQuery(fmt.Sprintf(
- `UPDATE {{%s}} set [[%s]] = (
+ `UPDATE "%s" set "%s" = (
CASE
- WHEN COALESCE([[%s]], '') = ''
+ WHEN coalesce("%s"::text, '[]') = ''
THEN '[]'
ELSE (
CASE
- WHEN json_valid([[%s]]) AND json_type([[%s]]) == 'array'
- THEN [[%s]]
- ELSE json_array([[%s]])
+ WHEN json_valid("%s"::text) AND json_typeof("%s"::json) = 'array'
+ THEN "%s"::json
+ ELSE json_build_array("%s")
END
)
END
@@ -239,15 +240,15 @@ func (dao *Dao) normalizeSingleVsMultipleFieldChanges(newCollection, oldCollecti
// note: for file fields the actual file objects are not
// deleted allowing additional custom handling via migration
copyQuery = txDao.DB().NewQuery(fmt.Sprintf(
- `UPDATE {{%s}} set [[%s]] = (
+ `UPDATE "%s" set "%s" = (
CASE
- WHEN COALESCE([[%s]], '[]') = '[]'
+ WHEN COALESCE("%s"::text, '[]') = '[]'
THEN ''
ELSE (
CASE
- WHEN json_valid([[%s]]) AND json_type([[%s]]) == 'array'
- THEN COALESCE(json_extract([[%s]], '$[#-1]'), '')
- ELSE [[%s]]
+ WHEN json_valid("%s"::text) AND json_typeof("%s"::json) = 'array'
+ THEN COALESCE("%s"->>-1,'')::text
+ ELSE "%s"::text
END
)
END
diff --git a/daos/record_table_sync_test.go b/daos/record_table_sync_test.go
index 68bec9c6f..2bc54f6a6 100644
--- a/daos/record_table_sync_test.go
+++ b/daos/record_table_sync_test.go
@@ -83,7 +83,7 @@ func TestSyncRecordTableSchema(t *testing.T) {
[]string{
"id", "created", "updated", "test",
"username", "email", "verified", "emailVisibility",
- "tokenKey", "passwordHash", "lastResetSentAt", "lastVerificationSentAt",
+ "tokenKey", "passwordHash", "lastResetSentAt", "lastVerificationSentAt", "lastLoginAlertSentAt",
},
4,
},
diff --git a/daos/record_test.go b/daos/record_test.go
index fcd3fbb45..cd5e9e816 100644
--- a/daos/record_test.go
+++ b/daos/record_test.go
@@ -1205,11 +1205,11 @@ func TestDeleteRecord(t *testing.T) {
}
// ensure that the json rel fields were prefixed
joinedQueries := strings.Join(calledQueries, " ")
- expectedRelManyPart := "`demo1` INNER JOIN json_each(CASE WHEN json_valid([[demo1.rel_many]]) THEN [[demo1.rel_many]] ELSE json_array([[demo1.rel_many]]) END)"
+ expectedRelManyPart := "SELECT `demo1`.* FROM `demo1` WHERE EXISTS (SELECT 1 FROM json_each(CASE WHEN json_valid([[demo1.rel_many]]) THEN [[demo1.rel_many]] ELSE json_array([[demo1.rel_many]]) END) {{__je__}} WHERE [[__je__.value]]='"
if !strings.Contains(joinedQueries, expectedRelManyPart) {
t.Fatalf("(rec3) Expected the cascade delete to call the query \n%v, got \n%v", expectedRelManyPart, calledQueries)
}
- expectedRelOnePart := "SELECT DISTINCT `demo1`.* FROM `demo1` WHERE (`demo1`.`rel_one`="
+ expectedRelOnePart := "SELECT `demo1`.* FROM `demo1` WHERE (`demo1`.`rel_one`='"
if !strings.Contains(joinedQueries, expectedRelOnePart) {
t.Fatalf("(rec3) Expected the cascade delete to call the query \n%v, got \n%v", expectedRelOnePart, calledQueries)
}
diff --git a/docker-compose.yaml b/docker-compose.yaml
index da7d6d63e..afa3206a0 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -2,7 +2,7 @@
version: '3'
services:
- # postgresql service
+ #postgresql service
postgresqlapp:
image: postgres:15.2-alpine
networks:
diff --git a/examples/base/main.go b/examples/base/main.go
index 4b3f9600b..6142696ac 100644
--- a/examples/base/main.go
+++ b/examples/base/main.go
@@ -3,10 +3,15 @@ package main
import (
"log"
"os"
+ "path/filepath"
+ "strings"
"time"
pocketbase "github.com/AlperRehaYAZGAN/postgresbase"
+ "github.com/AlperRehaYAZGAN/postgresbase/apis"
"github.com/AlperRehaYAZGAN/postgresbase/core"
+ "github.com/AlperRehaYAZGAN/postgresbase/plugins/ghupdate"
+ "github.com/AlperRehaYAZGAN/postgresbase/plugins/jsvm"
"github.com/AlperRehaYAZGAN/postgresbase/plugins/migratecmd"
)
@@ -16,14 +21,63 @@ func main() {
// ---------------------------------------------------------------
// Optional plugin flags:
// ---------------------------------------------------------------
+
+ var hooksDir string
+ app.RootCmd.PersistentFlags().StringVar(
+ &hooksDir,
+ "hooksDir",
+ "",
+ "the directory with the JS app hooks",
+ )
+
+ var hooksWatch bool
+ app.RootCmd.PersistentFlags().BoolVar(
+ &hooksWatch,
+ "hooksWatch",
+ true,
+ "auto restart the app on pb_hooks file change",
+ )
+
+ var hooksPool int
+ app.RootCmd.PersistentFlags().IntVar(
+ &hooksPool,
+ "hooksPool",
+ 25,
+ "the total prewarm goja.Runtime instances for the JS app hooks execution",
+ )
+
+ var migrationsDir string
+ app.RootCmd.PersistentFlags().StringVar(
+ &migrationsDir,
+ "migrationsDir",
+ "",
+ "the directory with the user defined migrations",
+ )
+
var automigrate bool
app.RootCmd.PersistentFlags().BoolVar(
&automigrate,
"automigrate",
- false,
+ true,
"enable/disable auto migrations",
)
+ var publicDir string
+ app.RootCmd.PersistentFlags().StringVar(
+ &publicDir,
+ "publicDir",
+ defaultPublicDir(),
+ "the directory to serve static files",
+ )
+
+ var indexFallback bool
+ app.RootCmd.PersistentFlags().BoolVar(
+ &indexFallback,
+ "indexFallback",
+ true,
+ "fallback the request to index.html on missing static path (eg. when pretty urls are used with SPA)",
+ )
+
var queryTimeout int
app.RootCmd.PersistentFlags().IntVar(
&queryTimeout,
@@ -38,28 +92,46 @@ func main() {
// Plugins and hooks:
// ---------------------------------------------------------------
+ // load jsvm (hooks and migrations)
+ jsvm.MustRegister(app, jsvm.Config{
+ MigrationsDir: migrationsDir,
+ HooksDir: hooksDir,
+ HooksWatch: hooksWatch,
+ HooksPoolSize: hooksPool,
+ })
+
// migrate command (with js templates)
migratecmd.MustRegister(app, app.RootCmd, migratecmd.Config{
TemplateLang: migratecmd.TemplateLangJS,
Automigrate: automigrate,
- // Dir: migrationsDir,
+ Dir: migrationsDir,
})
- // // GitHub selfupdate
- // ghupdate.MustRegister(app, app.RootCmd, ghupdate.Config{})
+ // GitHub selfupdate
+ ghupdate.MustRegister(app, app.RootCmd, ghupdate.Config{})
app.OnAfterBootstrap().PreAdd(func(e *core.BootstrapEvent) error {
app.Dao().ModelQueryTimeout = time.Duration(queryTimeout) * time.Second
return nil
})
- // app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
- // // serves static files from the provided public dir (if exists)
- // e.Router.GET("/*", apis.StaticDirectoryHandler(os.DirFS(publicDir), indexFallback))
- // return nil
- // })
+ app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
+ // serves static files from the provided public dir (if exists)
+ e.Router.GET("/*", apis.StaticDirectoryHandler(os.DirFS(publicDir), indexFallback))
+ return nil
+ })
if err := app.Start(); err != nil {
log.Fatal(err)
}
}
+
+// the default pb_public dir location is relative to the executable
+func defaultPublicDir() string {
+ if strings.HasPrefix(os.Args[0], os.TempDir()) {
+ // most likely ran with go run
+ return "./pb_public"
+ }
+
+ return filepath.Join(os.Args[0], "../pb_public")
+}
diff --git a/forms/admin_upsert.go b/forms/admin_upsert.go
index 65537c05b..fd40d390d 100644
--- a/forms/admin_upsert.go
+++ b/forms/admin_upsert.go
@@ -55,6 +55,7 @@ func (form *AdminUpsert) Validate() error {
&form.Id,
validation.When(
form.admin.IsNew(),
+ // !CHANGED: replace default id validations to Snowflak
validation.Length(models.SnowflakeMinLen, models.SnowflakeMaxLen),
validation.Match(idRegex),
validation.By(validators.UniqueId(form.dao, form.admin.TableName())),
diff --git a/forms/collection_upsert.go b/forms/collection_upsert.go
index 99c2cea9c..0908406d1 100644
--- a/forms/collection_upsert.go
+++ b/forms/collection_upsert.go
@@ -5,6 +5,7 @@ import (
"fmt"
"regexp"
"strconv"
+ "strings"
"github.com/AlperRehaYAZGAN/postgresbase/core"
"github.com/AlperRehaYAZGAN/postgresbase/daos"
@@ -105,6 +106,7 @@ func (form *CollectionUpsert) Validate() error {
&form.Id,
validation.When(
form.collection.IsNew(),
+ // !CHANGED: replace default id validations to Snowflak
validation.Length(models.SnowflakeMinLen, models.SnowflakeMaxLen),
validation.Match(idRegex),
validation.By(validators.UniqueId(form.dao, form.collection.TableName())),
@@ -131,6 +133,7 @@ func (form *CollectionUpsert) Validate() error {
validation.Match(collectionNameRegex),
validation.By(form.ensureNoSystemNameChange),
validation.By(form.checkUniqueName),
+ validation.By(form.checkForVia),
),
// validates using the type's own validation rules + some collection's specifics
validation.Field(
@@ -163,6 +166,19 @@ func (form *CollectionUpsert) Validate() error {
)
}
+func (form *CollectionUpsert) checkForVia(value any) error {
+ v, _ := value.(string)
+ if v == "" {
+ return nil
+ }
+
+ if strings.Contains(strings.ToLower(v), "_via_") {
+ return validation.NewError("validation_invalid_name", "The name of the collection cannot contain '_via_'.")
+ }
+
+ return nil
+}
+
func (form *CollectionUpsert) checkUniqueName(value any) error {
v, _ := value.(string)
diff --git a/forms/collection_upsert_test.go b/forms/collection_upsert_test.go
index 55f866975..69cc1bf34 100644
--- a/forms/collection_upsert_test.go
+++ b/forms/collection_upsert_test.go
@@ -105,6 +105,17 @@ func TestCollectionUpsertValidateAndSubmit(t *testing.T) {
{"empty create (auth)", "", `{"type":"auth"}`, []string{"name"}},
{"empty create (view)", "", `{"type":"view"}`, []string{"name", "options"}},
{"empty update", "demo2", "{}", []string{}},
+ {
+ "collection and field with _via_ names",
+ "",
+ `{
+ "name": "a_via_b",
+ "schema": [
+ {"name":"c_via_d","type":"text"}
+ ]
+ }`,
+ []string{"name", "schema"},
+ },
{
"create failure",
"",
diff --git a/forms/record_oauth2_login.go b/forms/record_oauth2_login.go
index 9e120ebc6..48349af8b 100644
--- a/forms/record_oauth2_login.go
+++ b/forms/record_oauth2_login.go
@@ -222,15 +222,11 @@ func (form *RecordOAuth2Login) submit(data *RecordOAuth2LoginData) error {
// load custom data
createForm.LoadData(form.CreateData)
- // load the OAuth2 profile data as fallback
- if createForm.Email == "" {
- createForm.Email = data.OAuth2User.Email
- }
- createForm.Verified = false
- if createForm.Email == data.OAuth2User.Email {
- // mark as verified as long as it matches the OAuth2 data (even if the email is empty)
- createForm.Verified = true
- }
+ // load the OAuth2 user data
+ createForm.Email = data.OAuth2User.Email
+ createForm.Verified = true // mark as verified as long as it matches the OAuth2 data (even if the email is empty)
+
+ // generate a random password if not explicitly set
if createForm.Password == "" {
createForm.Password = security.RandomString(30)
createForm.PasswordConfirm = createForm.Password
@@ -247,6 +243,19 @@ func (form *RecordOAuth2Login) submit(data *RecordOAuth2LoginData) error {
return err
}
} else {
+ isLoggedAuthRecord := form.loggedAuthRecord != nil &&
+ form.loggedAuthRecord.Id == data.Record.Id &&
+ form.loggedAuthRecord.Collection().Id == data.Record.Collection().Id
+
+ // set random password for users with unverified email
+ // (this is in case a malicious actor has registered via password using the user email)
+ if !isLoggedAuthRecord && data.Record.Email() != "" && !data.Record.Verified() {
+ data.Record.SetPassword(security.RandomString(30))
+ if err := txDao.SaveRecord(data.Record); err != nil {
+ return err
+ }
+ }
+
// update the existing auth record empty email if the data.OAuth2User has one
// (this is in case previously the auth record was created
// with an OAuth2 provider that didn't return an email address)
diff --git a/forms/record_password_reset_confirm.go b/forms/record_password_reset_confirm.go
index 0265ffa53..84d29be3b 100644
--- a/forms/record_password_reset_confirm.go
+++ b/forms/record_password_reset_confirm.go
@@ -5,7 +5,9 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/daos"
"github.com/AlperRehaYAZGAN/postgresbase/forms/validators"
"github.com/AlperRehaYAZGAN/postgresbase/models"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/security"
validation "github.com/go-ozzo/ozzo-validation/v4"
+ "github.com/spf13/cast"
)
// RecordPasswordResetConfirm is an auth record password reset confirmation form.
@@ -91,9 +93,21 @@ func (form *RecordPasswordResetConfirm) Submit(interceptors ...InterceptorFunc[*
return nil, err
}
+ if !authRecord.Verified() {
+ payload, err := security.ParseUnverifiedJWT(form.Token)
+ if err != nil {
+ return nil, err
+ }
+
+ // mark as verified if the email hasn't changed
+ if authRecord.Email() == cast.ToString(payload["email"]) {
+ authRecord.SetVerified(true)
+ }
+ }
+
interceptorsErr := runInterceptors(authRecord, func(m *models.Record) error {
authRecord = m
- return form.dao.SaveRecord(m)
+ return form.dao.SaveRecord(authRecord)
}, interceptors...)
if interceptorsErr != nil {
diff --git a/forms/record_password_reset_request.go b/forms/record_password_reset_request.go
index 3178176c2..e307c027b 100644
--- a/forms/record_password_reset_request.go
+++ b/forms/record_password_reset_request.go
@@ -46,7 +46,7 @@ func (form *RecordPasswordResetRequest) SetDao(dao *daos.Dao) {
// Validate makes the form validatable by implementing [validation.Validatable] interface.
//
-// This method doesn't checks whether auth record with `form.Email` exists (this is done on Submit).
+// This method doesn't check whether auth record with `form.Email` exists (this is done on Submit).
func (form *RecordPasswordResetRequest) Validate() error {
return validation.ValidateStruct(form,
validation.Field(
diff --git a/forms/record_upsert.go b/forms/record_upsert.go
index 08505cc4f..d8ff4f705 100644
--- a/forms/record_upsert.go
+++ b/forms/record_upsert.go
@@ -165,7 +165,7 @@ func (form *RecordUpsert) extractMultipartFormData(
data := map[string]any{}
filesToUpload := map[string][]*filesystem.File{}
- arrayValueSupportTypes := schema.ArraybleFieldTypes()
+ arraybleFieldTypes := schema.ArraybleFieldTypes()
for fullKey, values := range r.PostForm {
key := fullKey
@@ -178,8 +178,18 @@ func (form *RecordUpsert) extractMultipartFormData(
continue
}
+ // special case for multipart json encoded fields
+ if key == rest.MultipartJsonKey {
+ for _, v := range values {
+ if err := json.Unmarshal([]byte(v), &data); err != nil {
+ form.app.Logger().Debug("Failed to decode @json value into the data map", "error", err, "value", v)
+ }
+ }
+ continue
+ }
+
field := form.record.Collection().Schema.GetFieldByName(key)
- if field != nil && list.ExistInSlice(field.Type, arrayValueSupportTypes) {
+ if field != nil && list.ExistInSlice(field.Type, arraybleFieldTypes) {
data[key] = values
} else {
data[key] = values[0]
@@ -313,10 +323,10 @@ func (form *RecordUpsert) AddFiles(key string, files ...*filesystem.File) error
// Example
//
// // mark only only 2 files for removal
-// form.AddFiles("documents", "file1_aw4bdrvws6.txt", "file2_xwbs36bafv.txt")
+// form.RemoveFiles("documents", "file1_aw4bdrvws6.txt", "file2_xwbs36bafv.txt")
//
// // mark all "documents" files for removal
-// form.AddFiles("documents")
+// form.RemoveFiles("documents")
func (form *RecordUpsert) RemoveFiles(key string, toDelete ...string) error {
field := form.record.Collection().Schema.GetFieldByName(key)
if field == nil || field.Type != schema.FieldTypeFile {
@@ -466,6 +476,7 @@ func (form *RecordUpsert) Validate() error {
&form.Id,
validation.When(
form.record.IsNew(),
+ // !CHANGED: replace default id validations to Snowflak
validation.Length(models.SnowflakeMinLen, models.SnowflakeMaxLen),
validation.Match(idRegex),
validation.By(validators.UniqueId(form.dao, form.record.TableName())),
diff --git a/forms/record_upsert_test.go b/forms/record_upsert_test.go
index f987e49a3..c2174ed96 100644
--- a/forms/record_upsert_test.go
+++ b/forms/record_upsert_test.go
@@ -20,6 +20,7 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/tests"
"github.com/AlperRehaYAZGAN/postgresbase/tools/filesystem"
"github.com/AlperRehaYAZGAN/postgresbase/tools/list"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/rest"
"github.com/AlperRehaYAZGAN/postgresbase/tools/types"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/labstack/echo/v5"
@@ -150,9 +151,10 @@ func TestRecordUpsertLoadRequestMultipart(t *testing.T) {
}
formData, mp, err := tests.MockMultipartData(map[string]string{
- "a.b.id": "test_id",
- "a.b.text": "test123",
- "a.b.unknown": "test456",
+ "a.b.id": "test_id",
+ "a.b.text": "test123",
+ "a.b.unknown": "test456",
+ "a.b." + rest.MultipartJsonKey: `{"json":["a","b"],"email":"test3@example.com"}`,
// file fields unset/delete
"a.b.file_one-": "test_d61b33QdDU.txt", // delete with modifier
"a.b.file_many.0": "", // delete by index
@@ -184,6 +186,19 @@ func TestRecordUpsertLoadRequestMultipart(t *testing.T) {
t.Fatalf("Didn't expect unknown field to be set, got %v", v)
}
+ if v, ok := form.Data()["email"]; !ok || v != "test3@example.com" {
+ t.Fatalf("Expect email field to be %q, got %q", "test3@example.com", v)
+ }
+
+ rawJsonValue, ok := form.Data()["json"].(types.JsonRaw)
+ if !ok {
+ t.Fatal("Expect json field to be set")
+ }
+ expectedJsonValue := `["a","b"]`
+ if rawJsonValue.String() != expectedJsonValue {
+ t.Fatalf("Expect json field %v, got %v", expectedJsonValue, rawJsonValue)
+ }
+
fileOne, ok := form.Data()["file_one"]
if !ok {
t.Fatal("Expect file_one field to be set")
diff --git a/forms/validators/model.go b/forms/validators/model.go
index 6f070b03f..8eacf4088 100644
--- a/forms/validators/model.go
+++ b/forms/validators/model.go
@@ -9,7 +9,7 @@ import (
"github.com/pocketbase/dbx"
)
-// Compare checks whether the provided model id exists.
+// UniqueId checks whether the provided model id already exists.
//
// Example:
//
diff --git a/go.mod b/go.mod
index e53649263..5c8d7acb2 100644
--- a/go.mod
+++ b/go.mod
@@ -4,95 +4,146 @@ go 1.21
require (
github.com/AlecAivazis/survey/v2 v2.3.7
- github.com/aws/aws-sdk-go v1.49.15
+ github.com/aws/aws-sdk-go-v2 v1.30.3
+ github.com/aws/aws-sdk-go-v2/config v1.27.27
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.27
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2
+ github.com/aws/smithy-go v1.20.3
+ github.com/aws/aws-sdk-go-v2 v1.30.3
+ github.com/aws/aws-sdk-go-v2/config v1.27.27
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.27
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2
+ github.com/aws/smithy-go v1.20.3
github.com/disintegration/imaging v1.6.2
github.com/domodwyer/mailyak/v3 v3.6.2
- github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d
- github.com/dop251/goja_nodejs v0.0.0-20231122114759-e84d9a924c5c
- github.com/fatih/color v1.16.0
+ github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2
+ github.com/dop251/goja_nodejs v0.0.0-20240418154818-2aae10d4cbcf
+ github.com/fatih/color v1.17.0
+ github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2
+ github.com/dop251/goja_nodejs v0.0.0-20240418154818-2aae10d4cbcf
+ github.com/fatih/color v1.17.0
github.com/fsnotify/fsnotify v1.7.0
- github.com/gabriel-vasile/mimetype v1.4.3
- github.com/ganigeorgiev/fexpr v0.4.0
+ github.com/gabriel-vasile/mimetype v1.4.4
+ github.com/ganigeorgiev/fexpr v0.4.1
+ github.com/gabriel-vasile/mimetype v1.4.4
+ github.com/ganigeorgiev/fexpr v0.4.1
github.com/go-ozzo/ozzo-validation/v4 v4.3.0
- github.com/goccy/go-json v0.10.2
+ github.com/goccy/go-json v0.10.3
github.com/godruoyi/go-snowflake v0.0.2
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61
github.com/lib/pq v1.10.9
- github.com/mattn/go-sqlite3 v1.14.19
+ github.com/mattn/go-sqlite3 v1.14.22
github.com/pocketbase/dbx v1.10.1
- github.com/pocketbase/tygoja v0.0.0-20231111102932-5420517293f4
+ github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342
+ github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342
github.com/spf13/cast v1.6.0
- github.com/spf13/cobra v1.8.0
- gocloud.dev v0.36.0
- golang.org/x/crypto v0.17.0
- golang.org/x/net v0.19.0
- golang.org/x/oauth2 v0.15.0
- golang.org/x/sync v0.6.0
- modernc.org/sqlite v1.28.0
+ github.com/spf13/cobra v1.8.1
+ gocloud.dev v0.37.0
+ golang.org/x/crypto v0.25.0
+ golang.org/x/net v0.27.0
+ golang.org/x/oauth2 v0.21.0
+ golang.org/x/sync v0.7.0
+ modernc.org/sqlite v1.31.1
+ github.com/spf13/cobra v1.8.1
+ gocloud.dev v0.37.0
+ golang.org/x/crypto v0.25.0
+ golang.org/x/net v0.27.0
+ golang.org/x/oauth2 v0.21.0
+ golang.org/x/sync v0.7.0
+ modernc.org/sqlite v1.31.1
)
require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
- github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.26.3 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.16.14 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
- github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.10 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect
- github.com/aws/aws-sdk-go-v2/service/s3 v1.47.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
- github.com/aws/smithy-go v1.19.0 // indirect
- github.com/dlclark/regexp2 v1.10.0 // indirect
+ github.com/aws/aws-sdk-go v1.51.11 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
+ github.com/dlclark/regexp2 v1.11.0 // indirect
+ github.com/aws/aws-sdk-go v1.51.11 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
+ github.com/dlclark/regexp2 v1.11.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
+ github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
+ github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect
- github.com/google/uuid v1.5.0 // indirect
- github.com/google/wire v0.5.0 // indirect
- github.com/googleapis/gax-go/v2 v2.12.0 // indirect
+ github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.13.0 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
+ github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.13.0 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/joho/godotenv v1.5.1 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
go.opencensus.io v0.24.0 // indirect
- golang.org/x/image v0.15.0 // indirect
- golang.org/x/mod v0.14.0 // indirect
- golang.org/x/sys v0.16.0 // indirect
- golang.org/x/term v0.16.0 // indirect
- golang.org/x/text v0.14.0 // indirect
+ golang.org/x/image v0.18.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/sys v0.22.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ golang.org/x/image v0.18.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/sys v0.22.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.16.1 // indirect
- golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
- google.golang.org/api v0.155.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
- google.golang.org/grpc v1.60.1 // indirect
- google.golang.org/protobuf v1.32.0 // indirect
- lukechampine.com/uint128 v1.3.0 // indirect
- modernc.org/cc/v3 v3.41.0 // indirect
- modernc.org/ccgo/v3 v3.16.15 // indirect
- modernc.org/libc v1.38.0 // indirect
+ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
+ golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
+ google.golang.org/api v0.189.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a // indirect
+ google.golang.org/grpc v1.65.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
+ modernc.org/gc/v3 v3.0.0-20240722195230-4a140ff9c08e // indirect
+ modernc.org/libc v1.55.3 // indirect
+ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
+ golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
+ google.golang.org/api v0.189.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a // indirect
+ google.golang.org/grpc v1.65.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
+ modernc.org/gc/v3 v3.0.0-20240722195230-4a140ff9c08e // indirect
+ modernc.org/libc v1.55.3 // indirect
modernc.org/mathutil v1.6.0 // indirect
- modernc.org/memory v1.7.2 // indirect
- modernc.org/opt v0.1.3 // indirect
+ modernc.org/memory v1.8.0 // indirect
+ modernc.org/memory v1.8.0 // indirect
modernc.org/strutil v1.2.0 // indirect
modernc.org/token v1.1.0 // indirect
)
diff --git a/go.sum b/go.sum
index 02acf98da..07c9dec5a 100644
--- a/go.sum
+++ b/go.sum
@@ -1,14 +1,19 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
-cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
-cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
+cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
+cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
+cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
+cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
+cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
+cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU=
+cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY=
+cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -17,86 +22,50 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDe
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go v1.48.16 h1:mcj2/9J/MJ55Dov+ocMevhR8Jv6jW/fAxbrn4a1JFc8=
-github.com/aws/aws-sdk-go v1.48.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go v1.49.15 h1:aH9bSV4kL4ziH0AMtuYbukGIVebXddXBL0cKZ1zj15k=
-github.com/aws/aws-sdk-go v1.49.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk=
-github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
-github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
-github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
-github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o=
-github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg=
-github.com/aws/aws-sdk-go-v2/config v1.26.3 h1:dKuc2jdp10y13dEEvPqWxqLoc0vF3Z9FC45MvuQSxOA=
-github.com/aws/aws-sdk-go-v2/config v1.26.3/go.mod h1:Bxgi+DeeswYofcYO0XyGClwlrq3DZEXli0kLf4hkGA0=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.14 h1:mMDTwwYO9A0/JbOCOG7EOZHtYM+o7OfGWfu0toa23VE=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.14/go.mod h1:cniAUh3ErQPHtCQGPT5ouvSAQ0od8caTO9OOuufZOAE=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.10 h1:SdMso4tShJKrwGmwZPMO6urFilhTYkEZUPsndW0unfM=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.10/go.mod h1:qi+Nerp7JHgl+eyVtiRPA7T4bV5onFRWgpnF2JzPW60=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.8 h1:vPmag9qVmGho0jvtK5+nLwixJeX6Smd0IZE1OJIQ7wE=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.8/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 h1:dGrs+Q/WzhsiUKh82SfTVN66QzyulXuMDTV/G8ZxOac=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.6/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 h1:Yf2MIo9x+0tyv76GljxzqA3WtC5mw7NmazD2chwjxE4=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
-github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
-github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
+github.com/aws/aws-sdk-go v1.51.11 h1:El5VypsMIz7sFwAAj/j06JX9UGs4KAbAIEaZ57bNY4s=
+github.com/aws/aws-sdk-go v1.51.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
+github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
+github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
+github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
+github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
+github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
+github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
-github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
-github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -104,44 +73,45 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
-github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
-github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
-github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
-github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
-github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d h1:wi6jN5LVt/ljaBG4ue79Ekzb12QfJ52L9Q98tl8SWhw=
-github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
-github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
-github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
-github.com/dop251/goja_nodejs v0.0.0-20231122114759-e84d9a924c5c h1:hLoodLRD4KLWIH8eyAQCLcH8EqIrjac7fCkp/fHnvuQ=
-github.com/dop251/goja_nodejs v0.0.0-20231122114759-e84d9a924c5c/go.mod h1:bhGPmCgCCTSRfiMYWjpS46IDo9EUZXlsuUaPXSWGbv0=
+github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg=
+github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2/go.mod h1:o31y53rb/qiIAONF7w3FHJZRqqP3fzHUr1HqanthByw=
+github.com/dop251/goja_nodejs v0.0.0-20240418154818-2aae10d4cbcf h1:2JoVYP9iko8uuIW33BQafzaylDixXbdXCRw/vCoxL+s=
+github.com/dop251/goja_nodejs v0.0.0-20240418154818-2aae10d4cbcf/go.mod h1:bhGPmCgCCTSRfiMYWjpS46IDo9EUZXlsuUaPXSWGbv0=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
-github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
+github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
-github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
-github.com/ganigeorgiev/fexpr v0.4.0 h1:ojitI+VMNZX/odeNL1x3RzTTE8qAIVvnSSYPNAnQFDI=
-github.com/ganigeorgiev/fexpr v0.4.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
+github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I=
+github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s=
+github.com/ganigeorgiev/fexpr v0.4.1 h1:hpUgbUEEWIZhSDBtf4M9aUNfQQ0BZkGRaMePy7Gcx5k=
+github.com/ganigeorgiev/fexpr v0.4.1/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es=
github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew=
-github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
-github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
+github.com/go-sourcemap/sourcemap v2.1.4+incompatible h1:a+iTbH5auLKxaNwQFg0B+TCYl6lbukKPc7b5x0n1s6Q=
+github.com/go-sourcemap/sourcemap v2.1.4+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
-github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/go-sql-driver/mysql v1.8.0 h1:UtktXaU2Nb64z/pLiGIxY4431SJ4/dR5cjMmlVHgnT4=
+github.com/go-sql-driver/mysql v1.8.0/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godruoyi/go-snowflake v0.0.2 h1:rN9imTkrUJ5ZjuwTOi7kTGQFEZSUI3pwPMzAb7uitk4=
github.com/godruoyi/go-snowflake v0.0.2/go.mod h1:6JXMZzmleLpSK9pYpg4LXTcAz54mdYXTeXUvVks17+4=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
@@ -161,60 +131,45 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE=
-github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=
-github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
-github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
-github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
-github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
-github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ=
-github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 h1:e+8XbKB6IMn8A4OAyZccO4pYfB3s7bt6azNIPE7AnPg=
+github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
-github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
-github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=
-github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
+github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
+github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
-github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
+github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61 h1:FwuzbVh87iLiUQj1+uQUsuw9x5t9m5n5g7rG7o4svW4=
@@ -228,30 +183,29 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
-github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
-github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
+github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pocketbase/dbx v1.10.1 h1:cw+vsyfCJD8YObOVeqb93YErnlxwYMkNZ4rwN0G0AaA=
github.com/pocketbase/dbx v1.10.1/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
-github.com/pocketbase/tygoja v0.0.0-20231111102932-5420517293f4 h1:85kAYIKrKEeau7WgXg8B7Km8etrVavJAyH7XcR5MkFw=
-github.com/pocketbase/tygoja v0.0.0-20231111102932-5420517293f4/go.mod h1:dOJ+pCyqm/jRn5kO/TX598J0e5xGDcJAZerK5atCrKI=
+github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342 h1:OcAwewen3hs/zY8i0syt8CcMTGBJhQwQRVDLcoQVXVk=
+github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342/go.mod h1:dOJ+pCyqm/jRn5kO/TX598J0e5xGDcJAZerK5atCrKI=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -271,29 +225,33 @@ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-gocloud.dev v0.35.0 h1:x/Gtt5OJdT4j+ir1AXAIXb7bBnFawXAAaJptCUGk3HU=
-gocloud.dev v0.35.0/go.mod h1:wbyF+BhfdtLWyUtVEWRW13hFLb1vXnV2ovEhYGQe3ck=
-gocloud.dev v0.36.0 h1:q5zoXux4xkOZP473e1EZbG8Gq9f0vlg1VNH5Du/ybus=
-gocloud.dev v0.36.0/go.mod h1:bLxah6JQVKBaIxzsr5BQLYB4IYdWHkMZdzCXlo6F0gg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro=
+gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
-golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
-golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8=
-golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
+golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
+golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -304,19 +262,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
-golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -324,78 +280,59 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
-golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
-golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
-golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
-golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
-golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
-google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
-google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
-google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
-google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
+golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk=
+golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
+google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg=
-google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI=
-google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 h1:EWIeHfGuUf00zrVZGEgYFxok7plSAXBGcH7NNdMAWvA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
+google.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg=
+google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a h1:hqK4+jJZXCU4pW7jsAdGOVFIfLHQeV7LaizZKnZ84HI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
-google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
-google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
-google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
+google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -405,18 +342,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -424,35 +353,29 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
-lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
-modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
-modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
-modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
-modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
-modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
-modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
-modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
-modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
-modernc.org/libc v1.37.0 h1:WerjebcsP6A7Jy+f2lCnHAkiSTLf7IaSftBYUtoswak=
-modernc.org/libc v1.37.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
-modernc.org/libc v1.38.0 h1:o4Lpk0zNDSdsjfEXnF1FGXWQ9PDi1NOdWcLP5n13FGo=
-modernc.org/libc v1.38.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
+modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
+modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
+modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
+modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
+modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
+modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
+modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
+modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
+modernc.org/gc/v3 v3.0.0-20240722195230-4a140ff9c08e h1:WPC4v0rNIFb2PY+nBBEEKyugPPRHPzUgyN3xZPpGK58=
+modernc.org/gc/v3 v3.0.0-20240722195230-4a140ff9c08e/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
+modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
+modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
-modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
-modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
+modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
+modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
-modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
-modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
-modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
-modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
+modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
+modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
+modernc.org/sqlite v1.31.1 h1:XVU0VyzxrYHlBhIs1DiEgSl0ZtdnPtbLVy8hSkzxGrs=
+modernc.org/sqlite v1.31.1/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
-modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY=
-modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
-modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY=
-modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE=
diff --git a/mails/record.go b/mails/record.go
index 82b7ba6dd..5097a1145 100644
--- a/mails/record.go
+++ b/mails/record.go
@@ -12,6 +12,43 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/tools/mailer"
)
+// @todo remove after the refactoring
+//
+// SendRecordPasswordLoginAlert sends a OAuth2 password login alert to the specified auth record.
+func SendRecordPasswordLoginAlert(app core.App, authRecord *models.Record, providerNames ...string) error {
+ params := struct {
+ AppName string
+ AppUrl string
+ Record *models.Record
+ ProviderNames []string
+ }{
+ AppName: app.Settings().Meta.AppName,
+ AppUrl: app.Settings().Meta.AppUrl,
+ Record: authRecord,
+ ProviderNames: providerNames,
+ }
+
+ mailClient := app.NewMailClient()
+
+ // resolve body template
+ body, renderErr := resolveTemplateContent(params, templates.Layout, templates.PasswordLoginAlertBody)
+ if renderErr != nil {
+ return renderErr
+ }
+
+ message := &mailer.Message{
+ From: mail.Address{
+ Name: app.Settings().Meta.SenderName,
+ Address: app.Settings().Meta.SenderAddress,
+ },
+ To: []mail.Address{{Address: authRecord.Email()}},
+ Subject: "Password login alert",
+ HTML: body,
+ }
+
+ return mailClient.Send(message)
+}
+
// SendRecordPasswordReset sends a password reset request email to the specified user.
func SendRecordPasswordReset(app core.App, authRecord *models.Record) error {
token, tokenErr := tokens.NewRecordResetPasswordToken(app, authRecord)
@@ -92,7 +129,7 @@ func SendRecordVerification(app core.App, authRecord *models.Record) error {
})
}
-// SendUserChangeEmail sends a change email confirmation email to the specified user.
+// SendRecordChangeEmail sends a change email confirmation email to the specified user.
func SendRecordChangeEmail(app core.App, record *models.Record, newEmail string) error {
token, tokenErr := tokens.NewRecordChangeEmailToken(app, record, newEmail)
if tokenErr != nil {
diff --git a/mails/record_test.go b/mails/record_test.go
index 3d53eb5e0..86e257bd9 100644
--- a/mails/record_test.go
+++ b/mails/record_test.go
@@ -8,6 +8,35 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/tests"
)
+func TestSendRecordPasswordLoginAlert(t *testing.T) {
+ t.Parallel()
+
+ testApp, _ := tests.NewTestApp()
+ defer testApp.Cleanup()
+
+ // ensure that action url normalization will be applied
+ testApp.Settings().Meta.AppUrl = "http://localhost:8090////"
+
+ user, _ := testApp.Dao().FindFirstRecordByData("users", "email", "test@example.com")
+
+ err := mails.SendRecordPasswordLoginAlert(testApp, user, "test1", "test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if testApp.TestMailer.TotalSend != 1 {
+ t.Fatalf("Expected one email to be sent, got %d", testApp.TestMailer.TotalSend)
+ }
+
+ expectedParts := []string{"using a password", "OAuth2", "test1", "test2", "auth linked"}
+
+ for _, part := range expectedParts {
+ if !strings.Contains(testApp.TestMailer.LastMessage.HTML, part) {
+ t.Fatalf("Couldn't find %s\n in\n %s", part, testApp.TestMailer.LastMessage.HTML)
+ }
+ }
+}
+
func TestSendRecordPasswordReset(t *testing.T) {
t.Parallel()
diff --git a/mails/templates/layout.go b/mails/templates/layout.go
index 7a43ad920..21e0b9393 100644
--- a/mails/templates/layout.go
+++ b/mails/templates/layout.go
@@ -53,7 +53,7 @@ const Layout = `
.btn {
display: inline-block;
vertical-align: top;
- border: 0;
+ border: 1px solid #e5e5e5;
cursor: pointer;
color: #fff !important;
background: #16161a !important;
diff --git a/mails/templates/password_login_alert.go b/mails/templates/password_login_alert.go
new file mode 100644
index 000000000..8ffd1299f
--- /dev/null
+++ b/mails/templates/password_login_alert.go
@@ -0,0 +1,30 @@
+package templates
+
+// Available variables:
+//
+// ```
+// Record *models.Record
+// AppName string
+// AppUrl string
+// ProviderNames []string
+// ```
+const PasswordLoginAlertBody = `
+{{define "content"}}
+
Hello,
+
+ Just to let you know that someone has logged in to your {{.AppName}} account using a password while you already have
+ OAuth2
+ {{range $index, $provider := .ProviderNames }}
+ {{if $index}}|{{end}}
+ {{ $provider }}
+ {{ end }}
+ auth linked.
+
+ If you have recently signed in with a password, you may disregard this email.
+ If you don't recognize the above action, you should immediately change your {{.AppName}} account password.
+
+ Thanks,
+ {{.AppName}} team
+
+{{end}}
+`
diff --git a/migrations/1640988000_init.go b/migrations/1640988000_init.go
index 4e29c1961..7ff7addc5 100644
--- a/migrations/1640988000_init.go
+++ b/migrations/1640988000_init.go
@@ -34,6 +34,7 @@ func Register(
}
func init() {
+ // !CHANGED: We write json functions for postgres in migration files to support json equivalent operations from Pocketbase.
AppMigrations.Register(func(db dbx.Builder) error {
_, tablesErr := db.NewQuery(`
create or replace function json_valid(p_json text) returns boolean as $$
diff --git a/migrations/1677152688_rename_authentik_to_oidc.go b/migrations/1677152688_rename_authentik_to_oidc.go
index 7d53d9c86..801322f8a 100644
--- a/migrations/1677152688_rename_authentik_to_oidc.go
+++ b/migrations/1677152688_rename_authentik_to_oidc.go
@@ -6,6 +6,7 @@ import (
// This migration replaces the "authentikAuth" setting with "oidc".
func init() {
+ // !CHANGED: We write json functions for postgres in migration files to support json equivalent operations from Pocketbase.
AppMigrations.Register(func(db dbx.Builder) error {
_, err := db.NewQuery(`
UPDATE {{_params}}
diff --git a/migrations/1679943780_normalize_single_multiple_values.go b/migrations/1679943780_normalize_single_multiple_values.go
index 9138157cc..0074f227a 100644
--- a/migrations/1679943780_normalize_single_multiple_values.go
+++ b/migrations/1679943780_normalize_single_multiple_values.go
@@ -39,7 +39,7 @@ func normalizeMultivaluerFields(db dbx.Builder) error {
}
var updateQuery *dbx.Query
-
+ // !CHANGED: We write json functions for postgres in migration files to support json equivalent operations from Pocketbase.
if opt.IsMultiple() {
updateQuery = dao.DB().NewQuery(fmt.Sprintf(
`UPDATE {{%s}} set [[%s]] = (
@@ -65,7 +65,7 @@ func normalizeMultivaluerFields(db dbx.Builder) error {
))
} else {
updateQuery = dao.DB().NewQuery(fmt.Sprintf(
- // set-returning functions are not allowed in UPDATE at character
+ // !CHANGED: set-returning functions are not allowed in UPDATE at character
`UPDATE {{%s}} SET [[%s]] = (
CASE
WHEN COALESCE([[%s]], '[]') = '[]'
diff --git a/migrations/1718706525_add_login_alert_column.go b/migrations/1718706525_add_login_alert_column.go
new file mode 100644
index 000000000..327b6fdc6
--- /dev/null
+++ b/migrations/1718706525_add_login_alert_column.go
@@ -0,0 +1,56 @@
+package migrations
+
+import (
+ "slices"
+
+ "github.com/AlperRehaYAZGAN/postgresbase/daos"
+ "github.com/AlperRehaYAZGAN/postgresbase/models"
+ "github.com/AlperRehaYAZGAN/postgresbase/models/schema"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/security"
+ "github.com/pocketbase/dbx"
+)
+
+// adds a "lastLoginAlertSentAt" column to all auth collection tables (if not already)
+func init() {
+ AppMigrations.Register(func(db dbx.Builder) error {
+ dao := daos.New(db)
+
+ collections := []*models.Collection{}
+ err := dao.CollectionQuery().AndWhere(dbx.HashExp{"type": models.CollectionTypeAuth}).All(&collections)
+ if err != nil {
+ return err
+ }
+
+ var needToResetTokens bool
+
+ for _, c := range collections {
+ columns, err := dao.TableColumns(c.Name)
+ if err != nil {
+ return err
+ }
+ if slices.Contains(columns, schema.FieldNameLastLoginAlertSentAt) {
+ continue // already inserted
+ }
+
+ _, err = db.AddColumn(c.Name, schema.FieldNameLastLoginAlertSentAt, "TEXT DEFAULT '' NOT NULL").Execute()
+ if err != nil {
+ return err
+ }
+
+ opts := c.AuthOptions()
+ if opts.AllowOAuth2Auth && (opts.AllowEmailAuth || opts.AllowUsernameAuth) {
+ needToResetTokens = true
+ }
+ }
+
+ settings, _ := dao.FindSettings()
+ if needToResetTokens && settings != nil {
+ settings.RecordAuthToken.Secret = security.RandomString(50)
+ if err := dao.SaveSettings(settings); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }, nil)
+}
diff --git a/migrations/logs/1640988000_init.go b/migrations/logs/1640988000_init.go
index 3fdf0323c..0bb973f3f 100644
--- a/migrations/logs/1640988000_init.go
+++ b/migrations/logs/1640988000_init.go
@@ -8,6 +8,7 @@ import (
var LogsMigrations migrate.MigrationsList
func init() {
+ // !CHANGED: We write json functions for postgres in migration files to support json equivalent operations from Pocketbase.
LogsMigrations.Register(func(db dbx.Builder) error {
_, err := db.NewQuery(`
CREATE OR REPLACE FUNCTION json_extract(json_data json, key text)
diff --git a/models/base.go b/models/base.go
index 97a1793a7..e561fcec7 100644
--- a/models/base.go
+++ b/models/base.go
@@ -9,6 +9,7 @@ import (
const (
// DefaultIdLength is the default length of the generated model id.
DefaultIdLength = 15
+ // !CHANGED: add Snowflak Min Max values
SnowflakeMinLen = 8
SnowflakeMaxLen = 32
@@ -102,6 +103,7 @@ func (m *BaseModel) GetUpdated() types.DateTime {
//
// The generated id is a cryptographically random 15 characters length string.
func (m *BaseModel) RefreshId() {
+ // !CHANGED: use the SnowflakId
m.Id = security.RandomSnowflakeId()
}
diff --git a/models/record.go b/models/record.go
index 425bb543a..50dae23c7 100644
--- a/models/record.go
+++ b/models/record.go
@@ -310,7 +310,7 @@ func (m *Record) Set(key string, value any) {
switch key {
case schema.FieldNameEmailVisibility, schema.FieldNameVerified:
v = cast.ToBool(value)
- case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt:
+ case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt, schema.FieldNameLastLoginAlertSentAt:
v, _ = types.ParseDateTime(value)
case schema.FieldNameUsername, schema.FieldNameEmail, schema.FieldNameTokenKey, schema.FieldNamePasswordHash:
v = cast.ToString(value)
@@ -348,7 +348,7 @@ func (m *Record) Get(key string) any {
switch key {
case schema.FieldNameEmailVisibility, schema.FieldNameVerified:
v = cast.ToBool(v)
- case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt:
+ case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt, schema.FieldNameLastLoginAlertSentAt:
v, _ = types.ParseDateTime(v)
case schema.FieldNameUsername, schema.FieldNameEmail, schema.FieldNameTokenKey, schema.FieldNamePasswordHash:
v = cast.ToString(v)
@@ -686,7 +686,7 @@ func (m *Record) getNormalizeDataValueForDB(key string) any {
switch key {
case schema.FieldNameEmailVisibility, schema.FieldNameVerified:
return m.GetBool(key)
- case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt:
+ case schema.FieldNameLastResetSentAt, schema.FieldNameLastVerificationSentAt, schema.FieldNameLastLoginAlertSentAt:
return m.GetDateTime(key)
case schema.FieldNameUsername, schema.FieldNameEmail, schema.FieldNameTokenKey, schema.FieldNamePasswordHash:
return m.GetString(key)
@@ -899,6 +899,24 @@ func (m *Record) SetLastVerificationSentAt(dateTime types.DateTime) error {
return nil
}
+// LastLoginAlertSentAt returns the "lastLoginAlertSentAt" auth record data value.
+func (m *Record) LastLoginAlertSentAt() types.DateTime {
+ return m.GetDateTime(schema.FieldNameLastLoginAlertSentAt)
+}
+
+// SetLastLoginAlertSentAt sets an "lastLoginAlertSentAt" auth record data value.
+//
+// Returns an error if the record is not from an auth collection.
+func (m *Record) SetLastLoginAlertSentAt(dateTime types.DateTime) error {
+ if !m.collection.IsAuth() {
+ return notAuthRecordErr
+ }
+
+ m.Set(schema.FieldNameLastLoginAlertSentAt, dateTime)
+
+ return nil
+}
+
// PasswordHash returns the "passwordHash" auth record data value.
func (m *Record) PasswordHash() string {
return m.GetString(schema.FieldNamePasswordHash)
diff --git a/models/record_test.go b/models/record_test.go
index 21a054ad9..3dc1887f2 100644
--- a/models/record_test.go
+++ b/models/record_test.go
@@ -1328,7 +1328,7 @@ func TestRecordColumnValueMap(t *testing.T) {
},
{
models.CollectionTypeAuth,
- `{"created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":true,"field1":"test","field2":"test.png","field3":["test1","test2"],"field4":["test11","test12"],"id":"test_id","lastResetSentAt":"2022-01-02 10:00:30.123Z","lastVerificationSentAt":"","passwordHash":"test_passwordHash","tokenKey":"test_tokenKey","updated":"","username":"test_username","verified":false}`,
+ `{"created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":true,"field1":"test","field2":"test.png","field3":["test1","test2"],"field4":["test11","test12"],"id":"test_id","lastLoginAlertSentAt":"","lastResetSentAt":"2022-01-02 10:00:30.123Z","lastVerificationSentAt":"","passwordHash":"test_passwordHash","tokenKey":"test_tokenKey","updated":"","username":"test_username","verified":false}`,
},
}
@@ -1975,6 +1975,52 @@ func TestRecordRefreshTokenKey(t *testing.T) {
}
}
+func TestRecordLastPasswordLoginAlertSentAt(t *testing.T) {
+ t.Parallel()
+
+ scenarios := []struct {
+ collectionType string
+ expectError bool
+ }{
+ {models.CollectionTypeBase, true},
+ {models.CollectionTypeAuth, false},
+ }
+
+ testValue, err := types.ParseDateTime("2022-01-01 00:00:00.123Z")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i, s := range scenarios {
+ collection := &models.Collection{Type: s.collectionType}
+ m := models.NewRecord(collection)
+
+ if s.expectError {
+ if err := m.SetLastLoginAlertSentAt(testValue); err == nil {
+ t.Errorf("(%d) Expected error, got nil", i)
+ }
+ if v := m.LastLoginAlertSentAt(); !v.IsZero() {
+ t.Fatalf("(%d) Expected empty value, got %v", i, v)
+ }
+ // verify that nothing is stored in the record data slice
+ if v := m.Get(schema.FieldNameLastLoginAlertSentAt); v != nil {
+ t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameLastLoginAlertSentAt, v)
+ }
+ } else {
+ if err := m.SetLastLoginAlertSentAt(testValue); err != nil {
+ t.Fatalf("(%d) Expected nil, got error %v", i, err)
+ }
+ if v := m.LastLoginAlertSentAt(); v != testValue {
+ t.Fatalf("(%d) Expected %v, got %v", i, testValue, v)
+ }
+ // verify that the field is stored in the record data slice
+ if v := m.Get(schema.FieldNameLastLoginAlertSentAt); v != testValue {
+ t.Fatalf("(%d) Expected data field value %v, got %v", i, testValue, v)
+ }
+ }
+ }
+}
+
func TestRecordLastResetSentAt(t *testing.T) {
t.Parallel()
diff --git a/models/request_info.go b/models/request_info.go
index 260e11676..75d3ea3de 100644
--- a/models/request_info.go
+++ b/models/request_info.go
@@ -6,9 +6,17 @@ import (
"github.com/AlperRehaYAZGAN/postgresbase/models/schema"
)
+const (
+ RequestInfoContextDefault = "default"
+ RequestInfoContextRealtime = "realtime"
+ RequestInfoContextProtectedFile = "protectedFile"
+ RequestInfoContextOAuth2 = "oauth2"
+)
+
// RequestInfo defines a HTTP request data struct, usually used
// as part of the `@request.*` filter resolver.
type RequestInfo struct {
+ Context string `json:"context"`
Query map[string]any `json:"query"`
Data map[string]any `json:"data"`
Headers map[string]any `json:"headers"`
diff --git a/models/schema/schema_field.go b/models/schema/schema_field.go
index f8617062b..7c2adefa0 100644
--- a/models/schema/schema_field.go
+++ b/models/schema/schema_field.go
@@ -5,6 +5,7 @@ import (
"errors"
"regexp"
"strconv"
+ "strings"
"github.com/AlperRehaYAZGAN/postgresbase/tools/filesystem"
"github.com/AlperRehaYAZGAN/postgresbase/tools/list"
@@ -46,6 +47,7 @@ const (
FieldNamePasswordHash string = "passwordHash"
FieldNameLastResetSentAt string = "lastResetSentAt"
FieldNameLastVerificationSentAt string = "lastVerificationSentAt"
+ FieldNameLastLoginAlertSentAt string = "lastLoginAlertSentAt"
)
// BaseModelFieldNames returns the field names that all models have (id, created, updated).
@@ -77,6 +79,7 @@ func AuthFieldNames() []string {
FieldNamePasswordHash,
FieldNameLastResetSentAt,
FieldNameLastVerificationSentAt,
+ FieldNameLastLoginAlertSentAt,
}
}
@@ -198,6 +201,7 @@ func (f SchemaField) Validate() error {
excludeNames := BaseModelFieldNames()
// exclude special filter literals
+ // !CHANGED: Change rowid to ctid to avoid postgress conflict
excludeNames = append(excludeNames, "null", "true", "false", "_ctid_")
// exclude system literals
excludeNames = append(excludeNames, SystemFieldNames()...)
@@ -211,6 +215,7 @@ func (f SchemaField) Validate() error {
validation.Length(1, 255),
validation.Match(schemaFieldNameRegex),
validation.NotIn(list.ToInterfaceSlice(excludeNames)...),
+ validation.By(f.checkForVia),
),
validation.Field(&f.Type, validation.Required, validation.In(list.ToInterfaceSlice(FieldTypes())...)),
// currently file fields cannot be unique because a proper
@@ -228,6 +233,20 @@ func (f *SchemaField) checkOptions(value any) error {
return v.Validate()
}
+// @todo merge with the collections during the refactoring
+func (f *SchemaField) checkForVia(value any) error {
+ v, _ := value.(string)
+ if v == "" {
+ return nil
+ }
+
+ if strings.Contains(strings.ToLower(v), "_via_") {
+ return validation.NewError("validation_invalid_name", "The name of the field cannot contain '_via_'.")
+ }
+
+ return nil
+}
+
// InitOptions initializes the current field options based on its type.
//
// Returns error on unknown field type.
diff --git a/models/schema/schema_field_test.go b/models/schema/schema_field_test.go
index 9f710e539..ac0b2a4de 100644
--- a/models/schema/schema_field_test.go
+++ b/models/schema/schema_field_test.go
@@ -31,7 +31,7 @@ func TestSystemFieldNames(t *testing.T) {
func TestAuthFieldNames(t *testing.T) {
result := schema.AuthFieldNames()
- expected := 8
+ expected := 9
if len(result) != expected {
t.Fatalf("Expected %d auth field names, got %d (%v)", expected, len(result), result)
@@ -298,6 +298,15 @@ func TestSchemaFieldValidate(t *testing.T) {
},
[]string{"name"},
},
+ {
+ "name with _via_",
+ schema.SchemaField{
+ Type: schema.FieldTypeText,
+ Id: "1234567890",
+ Name: "a_via_b",
+ },
+ []string{"name"},
+ },
{
"reserved name (null)",
schema.SchemaField{
@@ -326,7 +335,7 @@ func TestSchemaFieldValidate(t *testing.T) {
[]string{"name"},
},
{
- "reserved name (_ctid_)",
+ "reserved name (_ctid_)", // !CHANGED: Change rowid to ctid to avoid postgress conflict
schema.SchemaField{
Type: schema.FieldTypeText,
Id: "1234567890",
diff --git a/models/settings/settings.go b/models/settings/settings.go
index c094e4b3a..3ca36c266 100644
--- a/models/settings/settings.go
+++ b/models/settings/settings.go
@@ -42,29 +42,31 @@ type Settings struct {
// Deprecated: Will be removed in v0.9+
EmailAuth EmailAuthConfig `form:"emailAuth" json:"emailAuth"`
- GoogleAuth AuthProviderConfig `form:"googleAuth" json:"googleAuth"`
- FacebookAuth AuthProviderConfig `form:"facebookAuth" json:"facebookAuth"`
- GithubAuth AuthProviderConfig `form:"githubAuth" json:"githubAuth"`
- GitlabAuth AuthProviderConfig `form:"gitlabAuth" json:"gitlabAuth"`
- DiscordAuth AuthProviderConfig `form:"discordAuth" json:"discordAuth"`
- TwitterAuth AuthProviderConfig `form:"twitterAuth" json:"twitterAuth"`
- MicrosoftAuth AuthProviderConfig `form:"microsoftAuth" json:"microsoftAuth"`
- SpotifyAuth AuthProviderConfig `form:"spotifyAuth" json:"spotifyAuth"`
- KakaoAuth AuthProviderConfig `form:"kakaoAuth" json:"kakaoAuth"`
- TwitchAuth AuthProviderConfig `form:"twitchAuth" json:"twitchAuth"`
- StravaAuth AuthProviderConfig `form:"stravaAuth" json:"stravaAuth"`
- GiteeAuth AuthProviderConfig `form:"giteeAuth" json:"giteeAuth"`
- LivechatAuth AuthProviderConfig `form:"livechatAuth" json:"livechatAuth"`
- GiteaAuth AuthProviderConfig `form:"giteaAuth" json:"giteaAuth"`
- OIDCAuth AuthProviderConfig `form:"oidcAuth" json:"oidcAuth"`
- OIDC2Auth AuthProviderConfig `form:"oidc2Auth" json:"oidc2Auth"`
- OIDC3Auth AuthProviderConfig `form:"oidc3Auth" json:"oidc3Auth"`
- AppleAuth AuthProviderConfig `form:"appleAuth" json:"appleAuth"`
- InstagramAuth AuthProviderConfig `form:"instagramAuth" json:"instagramAuth"`
- VKAuth AuthProviderConfig `form:"vkAuth" json:"vkAuth"`
- YandexAuth AuthProviderConfig `form:"yandexAuth" json:"yandexAuth"`
- PatreonAuth AuthProviderConfig `form:"patreonAuth" json:"patreonAuth"`
- MailcowAuth AuthProviderConfig `form:"mailcowAuth" json:"mailcowAuth"`
+ GoogleAuth AuthProviderConfig `form:"googleAuth" json:"googleAuth"`
+ FacebookAuth AuthProviderConfig `form:"facebookAuth" json:"facebookAuth"`
+ GithubAuth AuthProviderConfig `form:"githubAuth" json:"githubAuth"`
+ GitlabAuth AuthProviderConfig `form:"gitlabAuth" json:"gitlabAuth"`
+ DiscordAuth AuthProviderConfig `form:"discordAuth" json:"discordAuth"`
+ TwitterAuth AuthProviderConfig `form:"twitterAuth" json:"twitterAuth"`
+ MicrosoftAuth AuthProviderConfig `form:"microsoftAuth" json:"microsoftAuth"`
+ SpotifyAuth AuthProviderConfig `form:"spotifyAuth" json:"spotifyAuth"`
+ KakaoAuth AuthProviderConfig `form:"kakaoAuth" json:"kakaoAuth"`
+ TwitchAuth AuthProviderConfig `form:"twitchAuth" json:"twitchAuth"`
+ StravaAuth AuthProviderConfig `form:"stravaAuth" json:"stravaAuth"`
+ GiteeAuth AuthProviderConfig `form:"giteeAuth" json:"giteeAuth"`
+ LivechatAuth AuthProviderConfig `form:"livechatAuth" json:"livechatAuth"`
+ GiteaAuth AuthProviderConfig `form:"giteaAuth" json:"giteaAuth"`
+ OIDCAuth AuthProviderConfig `form:"oidcAuth" json:"oidcAuth"`
+ OIDC2Auth AuthProviderConfig `form:"oidc2Auth" json:"oidc2Auth"`
+ OIDC3Auth AuthProviderConfig `form:"oidc3Auth" json:"oidc3Auth"`
+ AppleAuth AuthProviderConfig `form:"appleAuth" json:"appleAuth"`
+ InstagramAuth AuthProviderConfig `form:"instagramAuth" json:"instagramAuth"`
+ VKAuth AuthProviderConfig `form:"vkAuth" json:"vkAuth"`
+ YandexAuth AuthProviderConfig `form:"yandexAuth" json:"yandexAuth"`
+ PatreonAuth AuthProviderConfig `form:"patreonAuth" json:"patreonAuth"`
+ MailcowAuth AuthProviderConfig `form:"mailcowAuth" json:"mailcowAuth"`
+ BitbucketAuth AuthProviderConfig `form:"bitbucketAuth" json:"bitbucketAuth"`
+ PlanningcenterAuth AuthProviderConfig `form:"planningcenterAuth" json:"planningcenterAuth"`
}
// New creates and returns a new default Settings instance.
@@ -196,6 +198,12 @@ func New() *Settings {
MailcowAuth: AuthProviderConfig{
Enabled: false,
},
+ BitbucketAuth: AuthProviderConfig{
+ Enabled: false,
+ },
+ PlanningcenterAuth: AuthProviderConfig{
+ Enabled: false,
+ },
}
}
@@ -241,6 +249,8 @@ func (s *Settings) Validate() error {
validation.Field(&s.YandexAuth),
validation.Field(&s.PatreonAuth),
validation.Field(&s.MailcowAuth),
+ validation.Field(&s.BitbucketAuth),
+ validation.Field(&s.PlanningcenterAuth),
)
}
@@ -309,6 +319,8 @@ func (s *Settings) RedactClone() (*Settings, error) {
&clone.YandexAuth.ClientSecret,
&clone.PatreonAuth.ClientSecret,
&clone.MailcowAuth.ClientSecret,
+ &clone.BitbucketAuth.ClientSecret,
+ &clone.PlanningcenterAuth.ClientSecret,
}
// mask all sensitive fields
@@ -328,29 +340,31 @@ func (s *Settings) NamedAuthProviderConfigs() map[string]AuthProviderConfig {
defer s.mux.RUnlock()
return map[string]AuthProviderConfig{
- auth.NameGoogle: s.GoogleAuth,
- auth.NameFacebook: s.FacebookAuth,
- auth.NameGithub: s.GithubAuth,
- auth.NameGitlab: s.GitlabAuth,
- auth.NameDiscord: s.DiscordAuth,
- auth.NameTwitter: s.TwitterAuth,
- auth.NameMicrosoft: s.MicrosoftAuth,
- auth.NameSpotify: s.SpotifyAuth,
- auth.NameKakao: s.KakaoAuth,
- auth.NameTwitch: s.TwitchAuth,
- auth.NameStrava: s.StravaAuth,
- auth.NameGitee: s.GiteeAuth,
- auth.NameLivechat: s.LivechatAuth,
- auth.NameGitea: s.GiteaAuth,
- auth.NameOIDC: s.OIDCAuth,
- auth.NameOIDC + "2": s.OIDC2Auth,
- auth.NameOIDC + "3": s.OIDC3Auth,
- auth.NameApple: s.AppleAuth,
- auth.NameInstagram: s.InstagramAuth,
- auth.NameVK: s.VKAuth,
- auth.NameYandex: s.YandexAuth,
- auth.NamePatreon: s.PatreonAuth,
- auth.NameMailcow: s.MailcowAuth,
+ auth.NameGoogle: s.GoogleAuth,
+ auth.NameFacebook: s.FacebookAuth,
+ auth.NameGithub: s.GithubAuth,
+ auth.NameGitlab: s.GitlabAuth,
+ auth.NameDiscord: s.DiscordAuth,
+ auth.NameTwitter: s.TwitterAuth,
+ auth.NameMicrosoft: s.MicrosoftAuth,
+ auth.NameSpotify: s.SpotifyAuth,
+ auth.NameKakao: s.KakaoAuth,
+ auth.NameTwitch: s.TwitchAuth,
+ auth.NameStrava: s.StravaAuth,
+ auth.NameGitee: s.GiteeAuth,
+ auth.NameLivechat: s.LivechatAuth,
+ auth.NameGitea: s.GiteaAuth,
+ auth.NameOIDC: s.OIDCAuth,
+ auth.NameOIDC + "2": s.OIDC2Auth,
+ auth.NameOIDC + "3": s.OIDC3Auth,
+ auth.NameApple: s.AppleAuth,
+ auth.NameInstagram: s.InstagramAuth,
+ auth.NameVK: s.VKAuth,
+ auth.NameYandex: s.YandexAuth,
+ auth.NamePatreon: s.PatreonAuth,
+ auth.NameMailcow: s.MailcowAuth,
+ auth.NameBitbucket: s.BitbucketAuth,
+ auth.NamePlanningcenter: s.PlanningcenterAuth,
}
}
@@ -449,7 +463,7 @@ type BackupsConfig struct {
// Leave it empty to disable the auto backups functionality.
Cron string `form:"cron" json:"cron"`
- // CronMaxKeep is the the max number of cron generated backups to
+ // CronMaxKeep is the max number of cron generated backups to
// keep before removing older entries.
//
// This field works only when the cron config has valid cron expression.
@@ -516,6 +530,7 @@ type EmailTemplate struct {
Body string `form:"body" json:"body"`
Subject string `form:"subject" json:"subject"`
ActionUrl string `form:"actionUrl" json:"actionUrl"`
+ Hidden bool `form:"hidden" json:"hidden"`
}
// Validate makes EmailTemplate validatable by implementing [validation.Validatable] interface.
@@ -638,7 +653,7 @@ func (c AuthProviderConfig) Validate() error {
// SetupProvider loads the current AuthProviderConfig into the specified provider.
func (c AuthProviderConfig) SetupProvider(provider auth.Provider) error {
if !c.Enabled {
- return errors.New("The provider is not enabled.")
+ return errors.New("the provider is not enabled")
}
if c.ClientId != "" {
diff --git a/models/settings/settings_test.go b/models/settings/settings_test.go
index 2a37ce19b..0680a7b84 100644
--- a/models/settings/settings_test.go
+++ b/models/settings/settings_test.go
@@ -78,6 +78,10 @@ func TestSettingsValidate(t *testing.T) {
s.PatreonAuth.ClientId = ""
s.MailcowAuth.Enabled = true
s.MailcowAuth.ClientId = ""
+ s.BitbucketAuth.Enabled = true
+ s.BitbucketAuth.ClientId = ""
+ s.PlanningcenterAuth.Enabled = true
+ s.PlanningcenterAuth.ClientId = ""
// check if Validate() is triggering the members validate methods.
err := s.Validate()
@@ -121,6 +125,8 @@ func TestSettingsValidate(t *testing.T) {
`"yandexAuth":{`,
`"patreonAuth":{`,
`"mailcowAuth":{`,
+ `"bitbucketAuth":{`,
+ `"planningcenterAuth":{`,
}
errBytes, _ := json.Marshal(err)
@@ -198,6 +204,10 @@ func TestSettingsMerge(t *testing.T) {
s2.PatreonAuth.ClientId = "patreon_test"
s2.MailcowAuth.Enabled = true
s2.MailcowAuth.ClientId = "mailcow_test"
+ s2.BitbucketAuth.Enabled = true
+ s2.BitbucketAuth.ClientId = "bitbucket_test"
+ s2.PlanningcenterAuth.Enabled = true
+ s2.PlanningcenterAuth.ClientId = "planningcenter_test"
if err := s1.Merge(s2); err != nil {
t.Fatal(err)
@@ -290,6 +300,8 @@ func TestSettingsRedactClone(t *testing.T) {
s1.YandexAuth.ClientSecret = testSecret
s1.PatreonAuth.ClientSecret = testSecret
s1.MailcowAuth.ClientSecret = testSecret
+ s1.BitbucketAuth.ClientSecret = testSecret
+ s1.PlanningcenterAuth.ClientSecret = testSecret
s1Bytes, err := json.Marshal(s1)
if err != nil {
@@ -350,6 +362,8 @@ func TestNamedAuthProviderConfigs(t *testing.T) {
s.YandexAuth.ClientId = "yandex_test"
s.PatreonAuth.ClientId = "patreon_test"
s.MailcowAuth.ClientId = "mailcow_test"
+ s.BitbucketAuth.ClientId = "bitbucket_test"
+ s.PlanningcenterAuth.ClientId = "planningcenter_test"
result := s.NamedAuthProviderConfigs()
@@ -383,6 +397,8 @@ func TestNamedAuthProviderConfigs(t *testing.T) {
`"yandex":{"enabled":false,"clientId":"yandex_test"`,
`"patreon":{"enabled":false,"clientId":"patreon_test"`,
`"mailcow":{"enabled":false,"clientId":"mailcow_test"`,
+ `"bitbucket":{"enabled":false,"clientId":"bitbucket_test"`,
+ `"planningcenter":{"enabled":false,"clientId":"planningcenter_test"`,
}
for _, p := range expectedParts {
if !strings.Contains(encodedStr, p) {
diff --git a/plugins/ghupdate/ghupdate.go b/plugins/ghupdate/ghupdate.go
index 222e18184..42feb7668 100644
--- a/plugins/ghupdate/ghupdate.go
+++ b/plugins/ghupdate/ghupdate.go
@@ -97,8 +97,8 @@ func Register(app core.App, rootCmd *cobra.Command, config Config) error {
type plugin struct {
app core.App
- currentVersion string
config Config
+ currentVersion string
}
func (p *plugin) updateCmd() *cobra.Command {
@@ -106,7 +106,7 @@ func (p *plugin) updateCmd() *cobra.Command {
command := &cobra.Command{
Use: "update",
- Short: "Automatically updates the current PocketBase executable with the latest available version",
+ Short: "Automatically updates the current app executable with the latest available version",
SilenceUsage: true,
RunE: func(command *cobra.Command, args []string) error {
var needConfirm bool
@@ -160,7 +160,7 @@ func (p *plugin) update(withBackup bool) error {
}
if compareVersions(strings.TrimPrefix(p.currentVersion, "v"), strings.TrimPrefix(latest.Tag, "v")) <= 0 {
- color.Green("You already have the latest PocketBase %s.", p.currentVersion)
+ color.Green("You already have the latest version %s.", p.currentVersion)
return nil
}
@@ -252,7 +252,7 @@ func (p *plugin) update(withBackup bool) error {
fmt.Print("\n")
color.Cyan("Here is a list with some of the %s changes:", latest.Tag)
// remove the update command note to avoid "stuttering"
- releaseNotes := strings.TrimSpace(strings.Replace(latest.Body, "> _To update the prebuilt executable you can run `./pocketbase update`._", "", 1))
+ releaseNotes := strings.TrimSpace(strings.Replace(latest.Body, "> _To update the prebuilt executable you can run `./"+p.config.ArchiveExecutable+" update`._", "", 1))
color.Cyan(releaseNotes)
fmt.Print("\n")
}
diff --git a/plugins/ghupdate/ghupdate_test.go b/plugins/ghupdate/ghupdate_test.go
index d13bbdccb..fc6bb97cc 100644
--- a/plugins/ghupdate/ghupdate_test.go
+++ b/plugins/ghupdate/ghupdate_test.go
@@ -21,6 +21,7 @@ func TestCompareVersions(t *testing.T) {
{"0.0.2", "0.0.1", -1},
{"0.16.2", "0.17.0", 1},
{"1.15.0", "0.16.1", -1},
+ {"1.2.9", "1.2.10", 1},
{"3.2", "4.0", 1},
{"3.2.4", "3.2.3", -1},
}
diff --git a/plugins/jsvm/binds.go b/plugins/jsvm/binds.go
index 906d3dbda..0a2aab592 100644
--- a/plugins/jsvm/binds.go
+++ b/plugins/jsvm/binds.go
@@ -36,7 +36,9 @@ import (
"github.com/dop251/goja"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/labstack/echo/v5"
+ "github.com/labstack/echo/v5/middleware"
"github.com/pocketbase/dbx"
+ "github.com/spf13/cast"
"github.com/spf13/cobra"
)
@@ -121,7 +123,7 @@ func cronBinds(app core.App, loader *goja.Runtime, executors *vmsPool) {
})
if err != nil {
- app.Logger().Debug(
+ app.Logger().Error(
"[cronAdd] failed to execute cron job",
slog.String("jobId", jobId),
slog.String("error", err.Error()),
@@ -375,7 +377,7 @@ func baseBinds(vm *goja.Runtime) {
})
vm.Set("RequestInfo", func(call goja.ConstructorCall) *goja.Object {
- instance := &models.RequestInfo{}
+ instance := &models.RequestInfo{Context: models.RequestInfoContextDefault}
return structConstructor(vm, call, instance)
})
@@ -533,6 +535,16 @@ func filesystemBinds(vm *goja.Runtime) {
obj.Set("fileFromPath", filesystem.NewFileFromPath)
obj.Set("fileFromBytes", filesystem.NewFileFromBytes)
obj.Set("fileFromMultipart", filesystem.NewFileFromMultipart)
+ obj.Set("fileFromUrl", func(url string, secTimeout int) (*filesystem.File, error) {
+ if secTimeout == 0 {
+ secTimeout = 120
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Duration(secTimeout)*time.Second)
+ defer cancel()
+
+ return filesystem.NewFileFromUrl(ctx, url)
+ })
}
func filepathBinds(vm *goja.Runtime) {
@@ -618,6 +630,8 @@ func apisBinds(vm *goja.Runtime) {
obj.Set("requireAdminOrRecordAuth", apis.RequireAdminOrRecordAuth)
obj.Set("requireAdminOrOwnerAuth", apis.RequireAdminOrOwnerAuth)
obj.Set("activityLogger", apis.ActivityLogger)
+ obj.Set("gzip", middleware.Gzip)
+ obj.Set("bodyLimit", middleware.BodyLimit)
// record helpers
obj.Set("requestInfo", apis.RequestInfo)
@@ -637,34 +651,61 @@ func httpClientBinds(vm *goja.Runtime) {
obj := vm.NewObject()
vm.Set("$http", obj)
+ vm.Set("FormData", func(call goja.ConstructorCall) *goja.Object {
+ instance := FormData{}
+
+ instanceValue := vm.ToValue(instance).(*goja.Object)
+ instanceValue.SetPrototype(call.This.Prototype())
+
+ return instanceValue
+ })
+
type sendResult struct {
- StatusCode int `json:"statusCode"`
+ Json any `json:"json"`
Headers map[string][]string `json:"headers"`
Cookies map[string]*http.Cookie `json:"cookies"`
Raw string `json:"raw"`
- Json any `json:"json"`
+ StatusCode int `json:"statusCode"`
}
type sendConfig struct {
+ // Deprecated: consider using Body instead
+ Data map[string]any
+
+ Body any // raw string or FormData
+ Headers map[string]string
Method string
Url string
- Body string
- Headers map[string]string
- Timeout int // seconds (default to 120)
- Data map[string]any // deprecated, consider using Body instead
+ Timeout int // seconds (default to 120)
}
obj.Set("send", func(params map[string]any) (*sendResult, error) {
- rawParams, err := json.Marshal(params)
- if err != nil {
- return nil, err
- }
-
config := sendConfig{
Method: "GET",
}
- if err := json.Unmarshal(rawParams, &config); err != nil {
- return nil, err
+
+ if v, ok := params["data"]; ok {
+ config.Data = cast.ToStringMap(v)
+ }
+
+ if v, ok := params["body"]; ok {
+ config.Body = v
+ }
+
+ if v, ok := params["headers"]; ok {
+ config.Headers = cast.ToStringMapString(v)
+ }
+
+ if v, ok := params["method"]; ok {
+ config.Method = cast.ToString(v)
+ }
+
+ if v, ok := params["url"]; ok {
+ config.Url = cast.ToString(v)
+ }
+
+ if v, ok := params["timeout"]; ok {
+ config.Timeout = cast.ToInt(v)
}
if config.Timeout <= 0 {
@@ -675,6 +716,7 @@ func httpClientBinds(vm *goja.Runtime) {
defer cancel()
var reqBody io.Reader
+ var contentType string
// legacy json body data
if len(config.Data) != 0 {
@@ -683,10 +725,19 @@ func httpClientBinds(vm *goja.Runtime) {
return nil, err
}
reqBody = bytes.NewReader(encoded)
- }
+ } else {
+ switch v := config.Body.(type) {
+ case FormData:
+ body, mp, err := v.toMultipart()
+ if err != nil {
+ return nil, err
+ }
- if config.Body != "" {
- reqBody = strings.NewReader(config.Body)
+ reqBody = body
+ contentType = mp.FormDataContentType()
+ default:
+ reqBody = strings.NewReader(cast.ToString(config.Body))
+ }
}
req, err := http.NewRequestWithContext(ctx, strings.ToUpper(config.Method), config.Url, reqBody)
@@ -698,7 +749,15 @@ func httpClientBinds(vm *goja.Runtime) {
req.Header.Add(k, v)
}
- // set default content-type header (if missing)
+ // set the explicit content type
+ // (overwriting the user provided header value if any)
+ if contentType != "" {
+ req.Header.Set("content-type", contentType)
+ }
+
+ // @todo consider removing during the refactoring
+ //
+ // fallback to json content-type
if req.Header.Get("content-type") == "" {
req.Header.Set("content-type", "application/json")
}
diff --git a/plugins/jsvm/binds_test.go b/plugins/jsvm/binds_test.go
index 12c437fe4..42d691729 100644
--- a/plugins/jsvm/binds_test.go
+++ b/plugins/jsvm/binds_test.go
@@ -2,6 +2,7 @@ package jsvm
import (
"encoding/json"
+ "fmt"
"io"
"mime/multipart"
"net/http"
@@ -890,13 +891,23 @@ func TestFilesystemBinds(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/error" {
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+
+ fmt.Fprintf(w, "test")
+ }))
+ defer srv.Close()
+
vm := goja.New()
vm.Set("mh", &multipart.FileHeader{Filename: "test"})
vm.Set("testFile", filepath.Join(app.DataDir(), "data.db"))
+ vm.Set("baseUrl", srv.URL)
baseBinds(vm)
filesystemBinds(vm)
- testBindsCount(vm, "$filesystem", 3, t)
+ testBindsCount(vm, "$filesystem", 4, t)
// fileFromPath
{
@@ -939,6 +950,28 @@ func TestFilesystemBinds(t *testing.T) {
t.Fatalf("[fileFromMultipart] Expected file with name %q, got %v", file.OriginalName, file)
}
}
+
+ // fileFromUrl (success)
+ {
+ v, err := vm.RunString(`$filesystem.fileFromUrl(baseUrl + "/test")`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ file, _ := v.Export().(*filesystem.File)
+
+ if file == nil || file.OriginalName != "test" {
+ t.Fatalf("[fileFromUrl] Expected file with name %q, got %v", file.OriginalName, file)
+ }
+ }
+
+ // fileFromUrl (failure)
+ {
+ _, err := vm.RunString(`$filesystem.fileFromUrl(baseUrl + "/error")`)
+ if err == nil {
+ t.Fatal("Expected url fetch error")
+ }
+ }
}
func TestFormsBinds(t *testing.T) {
@@ -956,7 +989,7 @@ func TestApisBindsCount(t *testing.T) {
apisBinds(vm)
testBindsCount(vm, "this", 6, t)
- testBindsCount(vm, "$apis", 12, t)
+ testBindsCount(vm, "$apis", 14, t)
}
func TestApisBindsApiError(t *testing.T) {
@@ -1121,6 +1154,7 @@ func TestHttpClientBindsCount(t *testing.T) {
vm := goja.New()
httpClientBinds(vm)
+ testBindsCount(vm, "this", 2, t) // + FormData
testBindsCount(vm, "$http", 1, t)
}
@@ -1223,6 +1257,15 @@ func TestHttpClientBindsSend(t *testing.T) {
headers: {"content-type": "text/plain"},
})
+ // with FormData
+ const formData = new FormData()
+ formData.append("title", "123")
+ const test3 = $http.send({
+ url: testUrl,
+ body: formData,
+ headers: {"content-type": "text/plain"}, // should be ignored
+ })
+
const scenarios = [
[test0, {
"statusCode": "400",
@@ -1244,6 +1287,18 @@ func TestHttpClientBindsSend(t *testing.T) {
"json.method": "GET",
"json.headers.content_type": "text/plain",
}],
+ [test3, {
+ "statusCode": "200",
+ "headers.X-Custom.0": "custom_header",
+ "cookies.sessionId.value": "123456",
+ "json.method": "GET",
+ "json.body": [
+ "\r\nContent-Disposition: form-data; name=\"title\"\r\n\r\n123\r\n--",
+ ],
+ "json.headers.content_type": [
+ "multipart/form-data; boundary="
+ ],
+ }],
]
for (let scenario of scenarios) {
@@ -1251,8 +1306,20 @@ func TestHttpClientBindsSend(t *testing.T) {
const expectations = scenario[1];
for (let key in expectations) {
- if (getNestedVal(result, key) != expectations[key]) {
- throw new Error('Expected ' + key + ' ' + expectations[key] + ', got: ' + result.raw);
+ const value = getNestedVal(result, key);
+ const expectation = expectations[key]
+ if (Array.isArray(expectation)) {
+ // check for partial match(es)
+ for (let exp of expectation) {
+ if (!value.includes(exp)) {
+ throw new Error('Expected ' + key + ' to contain ' + exp + ', got: ' + result.raw);
+ }
+ }
+ } else {
+ // check for direct match
+ if (value != expectation) {
+ throw new Error('Expected ' + key + ' ' + expectation + ', got: ' + result.raw);
+ }
}
}
}
diff --git a/plugins/jsvm/form_data.go b/plugins/jsvm/form_data.go
new file mode 100644
index 000000000..c7b76afcd
--- /dev/null
+++ b/plugins/jsvm/form_data.go
@@ -0,0 +1,149 @@
+package jsvm
+
+import (
+ "bytes"
+ "io"
+ "mime/multipart"
+
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/filesystem"
+ "github.com/spf13/cast"
+)
+
+// FormData represents an interface similar to the browser's [FormData].
+//
+// The value of each FormData entry must be a string or [*filesystem.File] instance.
+//
+// It is intended to be used together with the JSVM `$http.send` when
+// sending multipart/form-data requests.
+//
+// [FormData]: https://developer.mozilla.org/en-US/docs/Web/API/FormData.
+type FormData map[string][]any
+
+// Append appends a new value onto an existing key inside the current FormData,
+// or adds the key if it does not already exist.
+func (data FormData) Append(key string, value any) {
+ data[key] = append(data[key], value)
+}
+
+// Set sets a new value for an existing key inside the current FormData,
+// or adds the key/value if it does not already exist.
+func (data FormData) Set(key string, value any) {
+ data[key] = []any{value}
+}
+
+// Delete deletes a key and its value(s) from the current FormData.
+func (data FormData) Delete(key string) {
+ delete(data, key)
+}
+
+// Get returns the first value associated with a given key from
+// within the current FormData.
+//
+// If you expect multiple values and want all of them,
+// use the [FormData.GetAll] method instead.
+func (data FormData) Get(key string) any {
+ values, ok := data[key]
+ if !ok || len(values) == 0 {
+ return nil
+ }
+
+ return values[0]
+}
+
+// GetAll returns all the values associated with a given key
+// from within the current FormData.
+func (data FormData) GetAll(key string) []any {
+ values, ok := data[key]
+ if !ok {
+ return nil
+ }
+
+ return values
+}
+
+// Has returns whether a FormData object contains a certain key.
+func (data FormData) Has(key string) bool {
+ values, ok := data[key]
+
+ return ok && len(values) > 0
+}
+
+// Keys returns all keys contained in the current FormData.
+func (data FormData) Keys() []string {
+ result := make([]string, 0, len(data))
+
+ for k := range data {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// Keys returns all values contained in the current FormData.
+func (data FormData) Values() []any {
+ result := make([]any, 0, len(data))
+
+ for _, values := range data {
+ for _, v := range values {
+ result = append(result, v)
+ }
+ }
+
+ return result
+}
+
+// Entries returns a [key, value] slice pair for each FormData entry.
+func (data FormData) Entries() [][]any {
+ result := make([][]any, 0, len(data))
+
+ for k, values := range data {
+ for _, v := range values {
+ result = append(result, []any{k, v})
+ }
+ }
+
+ return result
+}
+
+// toMultipart converts the current FormData entries into multipart encoded data.
+func (data FormData) toMultipart() (*bytes.Buffer, *multipart.Writer, error) {
+ body := new(bytes.Buffer)
+
+ mp := multipart.NewWriter(body)
+ defer mp.Close()
+
+ for k, values := range data {
+ for _, rawValue := range values {
+ switch v := rawValue.(type) {
+ case *filesystem.File:
+ err := func() error {
+ mpw, err := mp.CreateFormFile(k, v.OriginalName)
+ if err != nil {
+ return err
+ }
+
+ file, err := v.Reader.Open()
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ if _, err := io.Copy(mpw, file); err != nil {
+ return err
+ }
+
+ return nil
+ }()
+ if err != nil {
+ return nil, nil, err
+ }
+ default:
+ if err := mp.WriteField(k, cast.ToString(v)); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ }
+
+ return body, mp, nil
+}
diff --git a/plugins/jsvm/form_data_test.go b/plugins/jsvm/form_data_test.go
new file mode 100644
index 000000000..91fb39486
--- /dev/null
+++ b/plugins/jsvm/form_data_test.go
@@ -0,0 +1,225 @@
+package jsvm
+
+import (
+ "bytes"
+ "encoding/json"
+ "strings"
+ "testing"
+
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/filesystem"
+ "github.com/AlperRehaYAZGAN/postgresbase/tools/list"
+)
+
+func TestFormDataAppendAndSet(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+
+ data.Append("a", 1)
+ data.Append("a", 2)
+
+ data.Append("b", 3)
+ data.Append("b", 4)
+ data.Set("b", 5) // should overwrite the previous 2 calls
+
+ data.Set("c", 6)
+ data.Set("c", 7)
+
+ if len(data["a"]) != 2 {
+ t.Fatalf("Expected 2 'a' values, got %v", data["a"])
+ }
+ if data["a"][0] != 1 || data["a"][1] != 2 {
+ t.Fatalf("Expected 1 and 2 'a' key values, got %v", data["a"])
+ }
+
+ if len(data["b"]) != 1 {
+ t.Fatalf("Expected 1 'b' values, got %v", data["b"])
+ }
+ if data["b"][0] != 5 {
+ t.Fatalf("Expected 5 as 'b' key value, got %v", data["b"])
+ }
+
+ if len(data["c"]) != 1 {
+ t.Fatalf("Expected 1 'c' values, got %v", data["c"])
+ }
+ if data["c"][0] != 7 {
+ t.Fatalf("Expected 7 as 'c' key value, got %v", data["c"])
+ }
+}
+
+func TestFormDataDelete(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("a", 2)
+ data.Append("b", 3)
+
+ data.Delete("missing") // should do nothing
+ data.Delete("a")
+
+ if len(data) != 1 {
+ t.Fatalf("Expected exactly 1 data remaining key, got %v", data)
+ }
+
+ if data["b"][0] != 3 {
+ t.Fatalf("Expected 3 as 'b' key value, got %v", data["b"])
+ }
+}
+
+func TestFormDataGet(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("a", 2)
+
+ if v := data.Get("missing"); v != nil {
+ t.Fatalf("Expected %v for key 'missing', got %v", nil, v)
+ }
+
+ if v := data.Get("a"); v != 1 {
+ t.Fatalf("Expected %v for key 'a', got %v", 1, v)
+ }
+}
+
+func TestFormDataGetAll(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("a", 2)
+
+ if v := data.GetAll("missing"); v != nil {
+ t.Fatalf("Expected %v for key 'a', got %v", nil, v)
+ }
+
+ values := data.GetAll("a")
+ if len(values) != 2 || values[0] != 1 || values[1] != 2 {
+ t.Fatalf("Expected 1 and 2 values, got %v", values)
+ }
+}
+
+func TestFormDataHas(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+
+ if v := data.Has("missing"); v {
+ t.Fatalf("Expected key 'missing' to not exist: %v", v)
+ }
+
+ if v := data.Has("a"); !v {
+ t.Fatalf("Expected key 'a' to exist: %v", v)
+ }
+}
+
+func TestFormDataKeys(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("b", 1)
+ data.Append("c", 1)
+ data.Append("a", 1)
+
+ keys := data.Keys()
+
+ expectedKeys := []string{"a", "b", "c"}
+
+ for _, expected := range expectedKeys {
+ if !list.ExistInSlice(expected, keys) {
+ t.Fatalf("Expected key %s to exists in %v", expected, keys)
+ }
+ }
+}
+
+func TestFormDataValues(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("b", 2)
+ data.Append("c", 3)
+ data.Append("a", 4)
+
+ values := data.Values()
+
+ expectedKeys := []any{1, 2, 3, 4}
+
+ for _, expected := range expectedKeys {
+ if !list.ExistInSlice(expected, values) {
+ t.Fatalf("Expected key %s to exists in %v", expected, values)
+ }
+ }
+}
+
+func TestFormDataEntries(t *testing.T) {
+ t.Parallel()
+
+ data := FormData{}
+ data.Append("a", 1)
+ data.Append("b", 2)
+ data.Append("c", 3)
+ data.Append("a", 4)
+
+ entries := data.Entries()
+
+ rawEntries, err := json.Marshal(entries)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(entries) != 4 {
+ t.Fatalf("Expected 4 entries")
+ }
+
+ expectedEntries := []string{`["a",1]`, `["a",4]`, `["b",2]`, `["c",3]`}
+ for _, expected := range expectedEntries {
+ if !bytes.Contains(rawEntries, []byte(expected)) {
+ t.Fatalf("Expected entry %s to exists in %s", expected, rawEntries)
+ }
+ }
+}
+
+func TestFormDataToMultipart(t *testing.T) {
+ t.Parallel()
+
+ f, err := filesystem.NewFileFromBytes([]byte("abc"), "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := FormData{}
+ data.Append("a", 1) // should be casted
+ data.Append("b", "test1")
+ data.Append("b", "test2")
+ data.Append("c", f)
+
+ body, mp, err := data.toMultipart()
+ if err != nil {
+ t.Fatal(err)
+ }
+ bodyStr := body.String()
+
+ // content type checks
+ contentType := mp.FormDataContentType()
+ expectedContentType := "multipart/form-data; boundary="
+ if !strings.Contains(contentType, expectedContentType) {
+ t.Fatalf("Expected to find content-type %s in %s", expectedContentType, contentType)
+ }
+
+ // body checks
+ expectedBodyParts := []string{
+ "Content-Disposition: form-data; name=\"a\"\r\n\r\n1",
+ "Content-Disposition: form-data; name=\"b\"\r\n\r\ntest1",
+ "Content-Disposition: form-data; name=\"b\"\r\n\r\ntest2",
+ "Content-Disposition: form-data; name=\"c\"; filename=\"test\"\r\nContent-Type: application/octet-stream\r\n\r\nabc",
+ }
+ for _, part := range expectedBodyParts {
+ if !strings.Contains(bodyStr, part) {
+ t.Fatalf("Expected to find %s in body\n%s", part, bodyStr)
+ }
+ }
+}
diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts
index 68e0ae2a5..dac56c617 100644
--- a/plugins/jsvm/internal/types/generated/types.d.ts
+++ b/plugins/jsvm/internal/types/generated/types.d.ts
@@ -1,4 +1,4 @@
-// 1704272575
+// 1710682789
// GENERATED CODE - DO NOT MODIFY BY HAND
// -------------------------------------------------------------------
@@ -80,7 +80,7 @@ declare function routerAdd(
* ```js
* routerUse((next) => {
* return (c) => {
- * console.log(c.Path())
+ * console.log(c.path())
* return next(c)
* }
* })
@@ -625,6 +625,22 @@ declare namespace $filesystem {
let fileFromPath: filesystem.newFileFromPath
let fileFromBytes: filesystem.newFileFromBytes
let fileFromMultipart: filesystem.newFileFromMultipart
+
+ /**
+ * fileFromUrl creates a new File from the provided url by
+ * downloading the resource and creating a BytesReader.
+ *
+ * Example:
+ *
+ * ```js
+ * // with default max timeout of 120sec
+ * const file1 = $filesystem.fileFromUrl("https://...")
+ *
+ * // with custom timeout of 15sec
+ * const file2 = $filesystem.fileFromUrl("https://...", 15)
+ * ```
+ */
+ export function fileFromUrl(url: string, secTimeout?: number): filesystem.File
}
// -------------------------------------------------------------------
@@ -962,6 +978,8 @@ declare namespace $apis {
let activityLogger: apis.activityLogger
let requestInfo: apis.requestInfo
let recordAuthResponse: apis.recordAuthResponse
+ let gzip: middleware.gzip
+ let bodyLimit: middleware.bodyLimit
let enrichRecord: apis.enrichRecord
let enrichRecords: apis.enrichRecords
}
@@ -970,6 +988,12 @@ declare namespace $apis {
// httpClientBinds
// -------------------------------------------------------------------
+// extra FormData overload to prevent TS warnings when used with non File/Blob value.
+interface FormData {
+ append(key:string, value:any): void
+ set(key:string, value:any): void
+}
+
/**
* `$http` defines common methods for working with HTTP requests.
*
@@ -984,7 +1008,7 @@ declare namespace $http {
* ```js
* const res = $http.send({
* url: "https://example.com",
- * data: {"title": "test"}
+ * body: JSON.stringify({"title": "test"})
* method: "post",
* })
*
@@ -997,7 +1021,7 @@ declare namespace $http {
*/
function send(config: {
url: string,
- body?: string,
+ body?: string|FormData,
method?: string, // default to "GET"
headers?: { [key:string]: string },
timeout?: number, // default to 120
@@ -1279,7 +1303,7 @@ namespace os {
* Setenv sets the value of the environment variable named by the key.
* It returns an error, if any.
*/
- (key: string): void
+ (key: string, value: string): void
}
interface unsetenv {
/**
@@ -1638,18 +1662,25 @@ namespace os {
readFrom(r: io.Reader): number
}
/**
- * fileWithoutReadFrom implements all the methods of *File other
- * than ReadFrom. This is used to permit ReadFrom to call io.Copy
- * without leading to a recursive call to ReadFrom.
+ * noReadFrom can be embedded alongside another type to
+ * hide the ReadFrom method of that other type.
*/
- type _subgUszO = File
- interface fileWithoutReadFrom extends _subgUszO {
+ interface noReadFrom {
}
- interface fileWithoutReadFrom {
+ interface noReadFrom {
/**
- * This ReadFrom method hides the *File ReadFrom method.
+ * ReadFrom hides another ReadFrom method.
+ * It should never be called.
*/
- readFrom(_arg0: fileWithoutReadFrom): void
+ readFrom(_arg0: io.Reader): number
+ }
+ /**
+ * fileWithoutReadFrom implements all the methods of *File other
+ * than ReadFrom. This is used to permit ReadFrom to call io.Copy
+ * without leading to a recursive call to ReadFrom.
+ */
+ type _subezgYh = noReadFrom&File
+ interface fileWithoutReadFrom extends _subezgYh {
}
interface File {
/**
@@ -1669,6 +1700,33 @@ namespace os {
*/
writeAt(b: string|Array, off: number): number
}
+ interface File {
+ /**
+ * WriteTo implements io.WriterTo.
+ */
+ writeTo(w: io.Writer): number
+ }
+ /**
+ * noWriteTo can be embedded alongside another type to
+ * hide the WriteTo method of that other type.
+ */
+ interface noWriteTo {
+ }
+ interface noWriteTo {
+ /**
+ * WriteTo hides another WriteTo method.
+ * It should never be called.
+ */
+ writeTo(_arg0: io.Writer): number
+ }
+ /**
+ * fileWithoutWriteTo implements all the methods of *File other
+ * than WriteTo. This is used to permit WriteTo to call io.Copy
+ * without leading to a recursive call to WriteTo.
+ */
+ type _subJsbVf = noWriteTo&File
+ interface fileWithoutWriteTo extends _subJsbVf {
+ }
interface File {
/**
* Seek sets the offset for the next Read or Write on file to offset, interpreted
@@ -1739,7 +1797,17 @@ namespace os {
* Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
* If there is an error, it will be of type *LinkError.
*/
- (oldpath: string): void
+ (oldpath: string, newpath: string): void
+ }
+ interface readlink {
+ /**
+ * Readlink returns the destination of the named symbolic link.
+ * If there is an error, it will be of type *PathError.
+ *
+ * If the link destination is relative, Readlink returns the relative path
+ * without resolving it to an absolute one.
+ */
+ (name: string): string
}
interface tempDir {
/**
@@ -1971,7 +2039,7 @@ namespace os {
* On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
* EPLAN9 error, wrapped in *PathError.
*/
- (name: string, uid: number): void
+ (name: string, uid: number, gid: number): void
}
interface lchown {
/**
@@ -1982,7 +2050,7 @@ namespace os {
* On Windows, it always returns the syscall.EWINDOWS error, wrapped
* in *PathError.
*/
- (name: string, uid: number): void
+ (name: string, uid: number, gid: number): void
}
interface File {
/**
@@ -1992,7 +2060,7 @@ namespace os {
* On Windows, it always returns the syscall.EWINDOWS error, wrapped
* in *PathError.
*/
- chown(uid: number): void
+ chown(uid: number, gid: number): void
}
interface File {
/**
@@ -2093,7 +2161,7 @@ namespace os {
* Link creates newname as a hard link to the oldname file.
* If there is an error, it will be of type *LinkError.
*/
- (oldname: string): void
+ (oldname: string, newname: string): void
}
interface symlink {
/**
@@ -2102,14 +2170,7 @@ namespace os {
* if oldname is later created as a directory the symlink will not work.
* If there is an error, it will be of type *LinkError.
*/
- (oldname: string): void
- }
- interface readlink {
- /**
- * Readlink returns the destination of the named symbolic link.
- * If there is an error, it will be of type *PathError.
- */
- (name: string): string
+ (oldname: string, newname: string): void
}
interface unixDirent {
}
@@ -2250,6 +2311,10 @@ namespace os {
* If the file is a symbolic link, the returned FileInfo
* describes the symbolic link. Lstat makes no attempt to follow the link.
* If there is an error, it will be of type *PathError.
+ *
+ * On Windows, if the file is a reparse point that is a surrogate for another
+ * named entity (such as a symbolic link or mounted folder), the returned
+ * FileInfo describes the reparse point, and makes no attempt to resolve it.
*/
(name: string): FileInfo
}
@@ -2277,7 +2342,7 @@ namespace os {
* The caller can use the file's Name method to find the pathname of the file.
* It is the caller's responsibility to remove the file when it is no longer needed.
*/
- (dir: string): (File)
+ (dir: string, pattern: string): (File)
}
interface mkdirTemp {
/**
@@ -2289,7 +2354,7 @@ namespace os {
* Multiple programs or goroutines calling MkdirTemp simultaneously will not choose the same directory.
* It is the caller's responsibility to remove the directory when it is no longer needed.
*/
- (dir: string): string
+ (dir: string, pattern: string): string
}
interface getpagesize {
/**
@@ -2300,8 +2365,8 @@ namespace os {
/**
* File represents an open file descriptor.
*/
- type _subGXkEd = file
- interface File extends _subGXkEd {
+ type _subzNURo = file
+ interface File extends _subzNURo {
}
/**
* A FileInfo describes a file and is returned by Stat and Lstat.
@@ -2330,7 +2395,7 @@ namespace os {
* SameFile only applies to results returned by this package's Stat.
* It returns false in other cases.
*/
- (fi1: FileInfo): boolean
+ (fi1: FileInfo, fi2: FileInfo): boolean
}
/**
* A fileStat is the implementation of FileInfo returned by Stat and Lstat.
@@ -2384,23 +2449,23 @@ namespace filepath {
* ```
*
* Match requires pattern to match all of name, not just a substring.
- * The only possible returned error is ErrBadPattern, when pattern
+ * The only possible returned error is [ErrBadPattern], when pattern
* is malformed.
*
* On Windows, escaping is disabled. Instead, '\\' is treated as
* path separator.
*/
- (pattern: string): boolean
+ (pattern: string, name: string): boolean
}
interface glob {
/**
* Glob returns the names of all files matching pattern or nil
* if there is no matching file. The syntax of patterns is the same
- * as in Match. The pattern may describe hierarchical names such as
- * /usr/*\/bin/ed (assuming the Separator is '/').
+ * as in [Match]. The pattern may describe hierarchical names such as
+ * /usr/*\/bin/ed (assuming the [Separator] is '/').
*
* Glob ignores file system errors such as I/O errors reading directories.
- * The only possible returned error is ErrBadPattern, when pattern
+ * The only possible returned error is [ErrBadPattern], when pattern
* is malformed.
*/
(pattern: string): Array
@@ -2419,7 +2484,7 @@ namespace filepath {
* by purely lexical processing. It applies the following rules
* iteratively until no further processing can be done:
*
- * 1. Replace multiple Separator elements with a single one.
+ * 1. Replace multiple [Separator] elements with a single one.
* 2. Eliminate each . path name element (the current directory).
* 3. Eliminate each inner .. path name element (the parent directory)
* ```
@@ -2488,7 +2553,7 @@ namespace filepath {
}
interface splitList {
/**
- * SplitList splits a list of paths joined by the OS-specific ListSeparator,
+ * SplitList splits a list of paths joined by the OS-specific [ListSeparator],
* usually found in PATH or GOPATH environment variables.
* Unlike strings.Split, SplitList returns an empty slice when passed an empty
* string.
@@ -2497,7 +2562,7 @@ namespace filepath {
}
interface split {
/**
- * Split splits path immediately following the final Separator,
+ * Split splits path immediately following the final [Separator],
* separating it into a directory and file name component.
* If there is no Separator in path, Split returns an empty dir
* and file set to path.
@@ -2508,7 +2573,7 @@ namespace filepath {
interface join {
/**
* Join joins any number of path elements into a single path,
- * separating them with an OS specific Separator. Empty elements
+ * separating them with an OS specific [Separator]. Empty elements
* are ignored. The result is Cleaned. However, if the argument
* list is empty or all its elements are empty, Join returns
* an empty string.
@@ -2532,7 +2597,7 @@ namespace filepath {
* links.
* If path is relative the result will be relative to the current directory,
* unless one of the components is an absolute symbolic link.
- * EvalSymlinks calls Clean on the result.
+ * EvalSymlinks calls [Clean] on the result.
*/
(path: string): string
}
@@ -2542,7 +2607,7 @@ namespace filepath {
* If the path is not absolute it will be joined with the current
* working directory to turn it into an absolute path. The absolute
* path name for a given file is not guaranteed to be unique.
- * Abs calls Clean on the result.
+ * Abs calls [Clean] on the result.
*/
(path: string): string
}
@@ -2550,17 +2615,17 @@ namespace filepath {
/**
* Rel returns a relative path that is lexically equivalent to targpath when
* joined to basepath with an intervening separator. That is,
- * Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
+ * [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
* On success, the returned path will always be relative to basepath,
* even if basepath and targpath share no elements.
* An error is returned if targpath can't be made relative to basepath or if
* knowing the current working directory would be necessary to compute it.
- * Rel calls Clean on the result.
+ * Rel calls [Clean] on the result.
*/
- (basepath: string): string
+ (basepath: string, targpath: string): string
}
/**
- * WalkFunc is the type of the function called by Walk to visit each
+ * WalkFunc is the type of the function called by [Walk] to visit each
* file or directory.
*
* The path argument contains the argument to Walk as a prefix.
@@ -2576,9 +2641,9 @@ namespace filepath {
* The info argument is the fs.FileInfo for the named path.
*
* The error result returned by the function controls how Walk continues.
- * If the function returns the special value SkipDir, Walk skips the
+ * If the function returns the special value [SkipDir], Walk skips the
* current directory (path if info.IsDir() is true, otherwise path's
- * parent directory). If the function returns the special value SkipAll,
+ * parent directory). If the function returns the special value [SkipAll],
* Walk skips all remaining files and directories. Otherwise, if the function
* returns a non-nil error, Walk stops entirely and returns that error.
*
@@ -2589,14 +2654,14 @@ namespace filepath {
*
* Walk calls the function with a non-nil err argument in two cases.
*
- * First, if an os.Lstat on the root directory or any directory or file
+ * First, if an [os.Lstat] on the root directory or any directory or file
* in the tree fails, Walk calls the function with path set to that
* directory or file's path, info set to nil, and err set to the error
* from os.Lstat.
*
* Second, if a directory's Readdirnames method fails, Walk calls the
* function with path set to the directory's path, info, set to an
- * fs.FileInfo describing the directory, and err set to the error from
+ * [fs.FileInfo] describing the directory, and err set to the error from
* Readdirnames.
*/
interface WalkFunc {(path: string, info: fs.FileInfo, err: Error): void }
@@ -2606,7 +2671,7 @@ namespace filepath {
* directory in the tree, including root.
*
* All errors that arise visiting files and directories are filtered by fn:
- * see the fs.WalkDirFunc documentation for details.
+ * see the [fs.WalkDirFunc] documentation for details.
*
* The files are walked in lexical order, which makes the output deterministic
* but requires WalkDir to read an entire directory into memory before proceeding
@@ -2620,30 +2685,13 @@ namespace filepath {
*/
(root: string, fn: fs.WalkDirFunc): void
}
- interface statDirEntry {
- }
- interface statDirEntry {
- name(): string
- }
- interface statDirEntry {
- isDir(): boolean
- }
- interface statDirEntry {
- type(): fs.FileMode
- }
- interface statDirEntry {
- info(): fs.FileInfo
- }
- interface statDirEntry {
- string(): string
- }
interface walk {
/**
* Walk walks the file tree rooted at root, calling fn for each file or
* directory in the tree, including root.
*
* All errors that arise visiting files and directories are filtered by fn:
- * see the WalkFunc documentation for details.
+ * see the [WalkFunc] documentation for details.
*
* The files are walked in lexical order, which makes the output deterministic
* but requires Walk to read an entire directory into memory before proceeding
@@ -2651,7 +2699,7 @@ namespace filepath {
*
* Walk does not follow symbolic links.
*
- * Walk is less efficient than WalkDir, introduced in Go 1.16,
+ * Walk is less efficient than [WalkDir], introduced in Go 1.16,
* which avoids calling os.Lstat on every visited file or directory.
*/
(root: string, fn: WalkFunc): void
@@ -2668,7 +2716,7 @@ namespace filepath {
interface dir {
/**
* Dir returns all but the last element of path, typically the path's directory.
- * After dropping the final element, Dir calls Clean on the path and trailing
+ * After dropping the final element, Dir calls [Clean] on the path and trailing
* slashes are removed.
* If the path is empty, Dir returns ".".
* If the path consists entirely of separators, Dir returns a single separator.
@@ -2698,7 +2746,133 @@ namespace filepath {
* Deprecated: HasPrefix does not respect path boundaries and
* does not ignore case when required.
*/
- (p: string): boolean
+ (p: string, prefix: string): boolean
+ }
+}
+
+/**
+ * Package exec runs external commands. It wraps os.StartProcess to make it
+ * easier to remap stdin and stdout, connect I/O with pipes, and do other
+ * adjustments.
+ *
+ * Unlike the "system" library call from C and other languages, the
+ * os/exec package intentionally does not invoke the system shell and
+ * does not expand any glob patterns or handle other expansions,
+ * pipelines, or redirections typically done by shells. The package
+ * behaves more like C's "exec" family of functions. To expand glob
+ * patterns, either call the shell directly, taking care to escape any
+ * dangerous input, or use the path/filepath package's Glob function.
+ * To expand environment variables, use package os's ExpandEnv.
+ *
+ * Note that the examples in this package assume a Unix system.
+ * They may not run on Windows, and they do not run in the Go Playground
+ * used by golang.org and godoc.org.
+ *
+ * # Executables in the current directory
+ *
+ * The functions Command and LookPath look for a program
+ * in the directories listed in the current path, following the
+ * conventions of the host operating system.
+ * Operating systems have for decades included the current
+ * directory in this search, sometimes implicitly and sometimes
+ * configured explicitly that way by default.
+ * Modern practice is that including the current directory
+ * is usually unexpected and often leads to security problems.
+ *
+ * To avoid those security problems, as of Go 1.19, this package will not resolve a program
+ * using an implicit or explicit path entry relative to the current directory.
+ * That is, if you run exec.LookPath("go"), it will not successfully return
+ * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
+ * Instead, if the usual path algorithms would result in that answer,
+ * these functions return an error err satisfying errors.Is(err, ErrDot).
+ *
+ * For example, consider these two program snippets:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * These will not find and run ./prog or .\prog.exe,
+ * no matter how the current path is configured.
+ *
+ * Code that always wants to run a program from the current directory
+ * can be rewritten to say "./prog" instead of "prog".
+ *
+ * Code that insists on including results from relative path entries
+ * can instead override the error using an errors.Is check:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if errors.Is(err, exec.ErrDot) {
+ * err = nil
+ * }
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if errors.Is(cmd.Err, exec.ErrDot) {
+ * cmd.Err = nil
+ * }
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * Setting the environment variable GODEBUG=execerrdot=0
+ * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
+ * behavior for programs that are unable to apply more targeted fixes.
+ * A future version of Go may remove support for this variable.
+ *
+ * Before adding such overrides, make sure you understand the
+ * security implications of doing so.
+ * See https://go.dev/blog/path-security for more information.
+ */
+namespace exec {
+ interface command {
+ /**
+ * Command returns the Cmd struct to execute the named program with
+ * the given arguments.
+ *
+ * It sets only the Path and Args in the returned structure.
+ *
+ * If name contains no path separators, Command uses LookPath to
+ * resolve name to a complete path if possible. Otherwise it uses name
+ * directly as Path.
+ *
+ * The returned Cmd's Args field is constructed from the command name
+ * followed by the elements of arg, so arg should not include the
+ * command name itself. For example, Command("echo", "hello").
+ * Args[0] is always name, not the possibly resolved Path.
+ *
+ * On Windows, processes receive the whole command line as a single string
+ * and do their own parsing. Command combines and quotes Args into a command
+ * line string with an algorithm compatible with applications using
+ * CommandLineToArgvW (which is the most common way). Notable exceptions are
+ * msiexec.exe and cmd.exe (and thus, all batch files), which have a different
+ * unquoting algorithm. In these or other similar cases, you can do the
+ * quoting yourself and provide the full command line in SysProcAttr.CmdLine,
+ * leaving Args empty.
+ */
+ (name: string, ...arg: string[]): (Cmd)
}
}
@@ -2752,13 +2926,17 @@ namespace security {
import crand = rand
interface encrypt {
/**
- * Encrypt encrypts data with key (must be valid 32 char aes key).
+ * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
*/
(data: string|Array, key: string): string
}
interface decrypt {
/**
- * Decrypt decrypts encrypted text with key (must be valid 32 chars aes key).
+ * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
*/
(cipherText: string, key: string): string|Array
}
@@ -2834,622 +3012,583 @@ namespace security {
}
}
-/**
- * Package validation provides configurable and extensible rules for validating data of various types.
- */
-namespace ozzo_validation {
+namespace filesystem {
/**
- * Error interface represents an validation error
+ * FileReader defines an interface for a file resource reader.
*/
- interface Error {
+ interface FileReader {
[key:string]: any;
- error(): string
- code(): string
- message(): string
- setMessage(_arg0: string): Error
- params(): _TygojaDict
- setParams(_arg0: _TygojaDict): Error
+ open(): io.ReadSeekCloser
}
-}
-
-/**
- * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases.
- */
-namespace dbx {
/**
- * Builder supports building SQL statements in a DB-agnostic way.
- * Builder mainly provides two sets of query building methods: those building SELECT statements
- * and those manipulating DB data or schema (e.g. INSERT statements, CREATE TABLE statements).
+ * File defines a single file [io.ReadSeekCloser] resource.
+ *
+ * The file could be from a local path, multipart/form-data header, etc.
*/
- interface Builder {
- [key:string]: any;
- /**
- * NewQuery creates a new Query object with the given SQL statement.
- * The SQL statement may contain parameter placeholders which can be bound with actual parameter
- * values before the statement is executed.
- */
- newQuery(_arg0: string): (Query)
+ interface File {
+ reader: FileReader
+ name: string
+ originalName: string
+ size: number
+ }
+ interface newFileFromPath {
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * NewFileFromPath creates a new File instance from the provided local file path.
*/
- select(..._arg0: string[]): (SelectQuery)
+ (path: string): (File)
+ }
+ interface newFileFromBytes {
/**
- * ModelQuery returns a new ModelQuery object that can be used to perform model insertion, update, and deletion.
- * The parameter to this method should be a pointer to the model struct that needs to be inserted, updated, or deleted.
+ * NewFileFromBytes creates a new File instance from the provided byte slice.
*/
- model(_arg0: {
- }): (ModelQuery)
+ (b: string|Array, name: string): (File)
+ }
+ interface newFileFromMultipart {
/**
- * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ * NewFileFromMultipart creates a new File from the provided multipart header.
*/
- generatePlaceholder(_arg0: number): string
+ (mh: multipart.FileHeader): (File)
+ }
+ interface newFileFromUrl {
/**
- * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
- */
- quote(_arg0: string): string
- /**
- * QuoteSimpleTableName quotes a simple table name.
- * A simple table name does not contain any schema prefix.
- */
- quoteSimpleTableName(_arg0: string): string
- /**
- * QuoteSimpleColumnName quotes a simple column name.
- * A simple column name does not contain any table prefix.
- */
- quoteSimpleColumnName(_arg0: string): string
- /**
- * QueryBuilder returns the query builder supporting the current DB.
- */
- queryBuilder(): QueryBuilder
- /**
- * Insert creates a Query that represents an INSERT SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
- */
- insert(table: string, cols: Params): (Query)
- /**
- * Upsert creates a Query that represents an UPSERT SQL statement.
- * Upsert inserts a row into the table if the primary key or unique index is not found.
- * Otherwise it will update the row with the new values.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
- */
- upsert(table: string, cols: Params, ...constraints: string[]): (Query)
- /**
- * Update creates a Query that represents an UPDATE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding new column
- * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will update ALL rows in the table).
- */
- update(table: string, cols: Params, where: Expression): (Query)
- /**
- * Delete creates a Query that represents a DELETE SQL statement.
- * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will delete ALL rows in the table).
- */
- delete(table: string, where: Expression): (Query)
- /**
- * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column types.
- * The optional "options" parameters will be appended to the generated SQL statement.
- */
- createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
- /**
- * RenameTable creates a Query that can be used to rename a table.
- */
- renameTable(oldName: string): (Query)
- /**
- * DropTable creates a Query that can be used to drop a table.
- */
- dropTable(table: string): (Query)
- /**
- * TruncateTable creates a Query that can be used to truncate a table.
- */
- truncateTable(table: string): (Query)
- /**
- * AddColumn creates a Query that can be used to add a column to a table.
- */
- addColumn(table: string): (Query)
- /**
- * DropColumn creates a Query that can be used to drop a column from a table.
- */
- dropColumn(table: string): (Query)
- /**
- * RenameColumn creates a Query that can be used to rename a column in a table.
- */
- renameColumn(table: string): (Query)
- /**
- * AlterColumn creates a Query that can be used to change the definition of a table column.
- */
- alterColumn(table: string): (Query)
- /**
- * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
- * The "name" parameter specifies the name of the primary key constraint.
- */
- addPrimaryKey(table: string, ...cols: string[]): (Query)
- /**
- * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
- */
- dropPrimaryKey(table: string): (Query)
- /**
- * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
- * The length of cols and refCols must be the same as they refer to the primary and referential columns.
- * The optional "options" parameters will be appended to the SQL statement. They can be used to
- * specify options such as "ON DELETE CASCADE".
- */
- addForeignKey(table: string, cols: Array, refTable: string, ...options: string[]): (Query)
- /**
- * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
- */
- dropForeignKey(table: string): (Query)
- /**
- * CreateIndex creates a Query that can be used to create an index for a table.
- */
- createIndex(table: string, ...cols: string[]): (Query)
- /**
- * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
- */
- createUniqueIndex(table: string, ...cols: string[]): (Query)
- /**
- * DropIndex creates a Query that can be used to remove the named index from a table.
+ * NewFileFromUrl creates a new File from the provided url by
+ * downloading the resource and load it as BytesReader.
+ *
+ * Example
+ *
+ * ```
+ * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ * defer cancel()
+ *
+ * file, err := filesystem.NewFileFromUrl(ctx, "https://example.com/image.png")
+ * ```
*/
- dropIndex(table: string): (Query)
+ (ctx: context.Context, url: string): (File)
}
/**
- * BaseBuilder provides a basic implementation of the Builder interface.
+ * MultipartReader defines a FileReader from [multipart.FileHeader].
*/
- interface BaseBuilder {
+ interface MultipartReader {
+ header?: multipart.FileHeader
}
- interface newBaseBuilder {
+ interface MultipartReader {
/**
- * NewBaseBuilder creates a new BaseBuilder instance.
+ * Open implements the [filesystem.FileReader] interface.
*/
- (db: DB, executor: Executor): (BaseBuilder)
+ open(): io.ReadSeekCloser
}
- interface BaseBuilder {
+ /**
+ * PathReader defines a FileReader from a local file path.
+ */
+ interface PathReader {
+ path: string
+ }
+ interface PathReader {
/**
- * DB returns the DB instance that this builder is associated with.
+ * Open implements the [filesystem.FileReader] interface.
*/
- db(): (DB)
+ open(): io.ReadSeekCloser
}
- interface BaseBuilder {
+ /**
+ * BytesReader defines a FileReader from bytes content.
+ */
+ interface BytesReader {
+ bytes: string|Array
+ }
+ interface BytesReader {
/**
- * Executor returns the executor object (a DB instance or a transaction) for executing SQL statements.
+ * Open implements the [filesystem.FileReader] interface.
*/
- executor(): Executor
+ open(): io.ReadSeekCloser
}
- interface BaseBuilder {
+ type _subRtcDW = bytes.Reader
+ interface bytesReadSeekCloser extends _subRtcDW {
+ }
+ interface bytesReadSeekCloser {
/**
- * NewQuery creates a new Query object with the given SQL statement.
- * The SQL statement may contain parameter placeholders which can be bound with actual parameter
- * values before the statement is executed.
+ * Close implements the [io.ReadSeekCloser] interface.
*/
- newQuery(sql: string): (Query)
+ close(): void
}
- interface BaseBuilder {
+ interface System {
+ }
+ interface newS3 {
/**
- * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ * NewS3 initializes an S3 filesystem instance.
+ *
+ * NB! Make sure to call `Close()` after you are done working with it.
*/
- generatePlaceholder(_arg0: number): string
+ (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System)
}
- interface BaseBuilder {
+ interface newLocal {
/**
- * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ * NewLocal initializes a new local filesystem instance.
+ *
+ * NB! Make sure to call `Close()` after you are done working with it.
*/
- quote(s: string): string
+ (dirPath: string): (System)
}
- interface BaseBuilder {
+ interface System {
/**
- * QuoteSimpleTableName quotes a simple table name.
- * A simple table name does not contain any schema prefix.
+ * SetContext assigns the specified context to the current filesystem.
*/
- quoteSimpleTableName(s: string): string
+ setContext(ctx: context.Context): void
}
- interface BaseBuilder {
+ interface System {
/**
- * QuoteSimpleColumnName quotes a simple column name.
- * A simple column name does not contain any table prefix.
+ * Close releases any resources used for the related filesystem.
*/
- quoteSimpleColumnName(s: string): string
+ close(): void
}
- interface BaseBuilder {
+ interface System {
/**
- * Insert creates a Query that represents an INSERT SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
+ * Exists checks if file with fileKey path exists or not.
*/
- insert(table: string, cols: Params): (Query)
+ exists(fileKey: string): boolean
}
- interface BaseBuilder {
+ interface System {
/**
- * Upsert creates a Query that represents an UPSERT SQL statement.
- * Upsert inserts a row into the table if the primary key or unique index is not found.
- * Otherwise it will update the row with the new values.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
+ * Attributes returns the attributes for the file with fileKey path.
*/
- upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ attributes(fileKey: string): (blob.Attributes)
}
- interface BaseBuilder {
+ interface System {
/**
- * Update creates a Query that represents an UPDATE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding new column
- * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will update ALL rows in the table).
+ * GetFile returns a file content reader for the given fileKey.
+ *
+ * NB! Make sure to call `Close()` after you are done working with it.
*/
- update(table: string, cols: Params, where: Expression): (Query)
+ getFile(fileKey: string): (blob.Reader)
}
- interface BaseBuilder {
+ interface System {
/**
- * Delete creates a Query that represents a DELETE SQL statement.
- * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
- * (be careful in this case as the SQL statement will delete ALL rows in the table).
+ * Copy copies the file stored at srcKey to dstKey.
+ *
+ * If dstKey file already exists, it is overwritten.
*/
- delete(table: string, where: Expression): (Query)
+ copy(srcKey: string, dstKey: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
- * The keys of cols are the column names, while the values of cols are the corresponding column types.
- * The optional "options" parameters will be appended to the generated SQL statement.
+ * List returns a flat list with info for all files under the specified prefix.
*/
- createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
+ list(prefix: string): Array<(blob.ListObject | undefined)>
}
- interface BaseBuilder {
+ interface System {
/**
- * RenameTable creates a Query that can be used to rename a table.
+ * Upload writes content into the fileKey location.
*/
- renameTable(oldName: string): (Query)
+ upload(content: string|Array, fileKey: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * DropTable creates a Query that can be used to drop a table.
+ * UploadFile uploads the provided multipart file to the fileKey location.
*/
- dropTable(table: string): (Query)
+ uploadFile(file: File, fileKey: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * TruncateTable creates a Query that can be used to truncate a table.
+ * UploadMultipart uploads the provided multipart file to the fileKey location.
*/
- truncateTable(table: string): (Query)
+ uploadMultipart(fh: multipart.FileHeader, fileKey: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * AddColumn creates a Query that can be used to add a column to a table.
+ * Delete deletes stored file at fileKey location.
*/
- addColumn(table: string): (Query)
+ delete(fileKey: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * DropColumn creates a Query that can be used to drop a column from a table.
+ * DeletePrefix deletes everything starting with the specified prefix.
*/
- dropColumn(table: string): (Query)
+ deletePrefix(prefix: string): Array
}
- interface BaseBuilder {
+ interface System {
/**
- * RenameColumn creates a Query that can be used to rename a column in a table.
+ * Serve serves the file at fileKey location to an HTTP response.
+ *
+ * If the `download` query parameter is used the file will be always served for
+ * download no matter of its type (aka. with "Content-Disposition: attachment").
*/
- renameColumn(table: string): (Query)
+ serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void
}
- interface BaseBuilder {
+ interface System {
/**
- * AlterColumn creates a Query that can be used to change the definition of a table column.
+ * CreateThumb creates a new thumb image for the file at originalKey location.
+ * The new thumb file is stored at thumbKey location.
+ *
+ * thumbSize is in the format:
+ * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio
+ * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio
+ * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center)
+ * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top)
+ * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom)
+ * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping)
*/
- alterColumn(table: string): (Query)
+ createThumb(originalKey: string, thumbKey: string, thumbSize: string): void
}
- interface BaseBuilder {
- /**
- * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
- * The "name" parameter specifies the name of the primary key constraint.
- */
- addPrimaryKey(table: string, ...cols: string[]): (Query)
+ // @ts-ignore
+ import v4 = signer
+ // @ts-ignore
+ import smithyhttp = http
+ interface ignoredHeadersKey {
}
- interface BaseBuilder {
+}
+
+/**
+ * Package template is a thin wrapper around the standard html/template
+ * and text/template packages that implements a convenient registry to
+ * load and cache templates on the fly concurrently.
+ *
+ * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
+ *
+ * Example:
+ *
+ * ```
+ * registry := template.NewRegistry()
+ *
+ * html1, err := registry.LoadFiles(
+ * // the files set wil be parsed only once and then cached
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "John"})
+ *
+ * html2, err := registry.LoadFiles(
+ * // reuse the already parsed and cached files set
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "Jane"})
+ * ```
+ */
+namespace template {
+ interface newRegistry {
/**
- * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ * NewRegistry creates and initializes a new templates registry with
+ * some defaults (eg. global "raw" template function for unescaped HTML).
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
*/
- dropPrimaryKey(table: string): (Query)
+ (): (Registry)
}
- interface BaseBuilder {
- /**
- * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
- * The length of cols and refCols must be the same as they refer to the primary and referential columns.
- * The optional "options" parameters will be appended to the SQL statement. They can be used to
- * specify options such as "ON DELETE CASCADE".
- */
- addForeignKey(table: string, cols: Array, refTable: string, ...options: string[]): (Query)
+ /**
+ * Registry defines a templates registry that is safe to be used by multiple goroutines.
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
+ */
+ interface Registry {
}
- interface BaseBuilder {
+ interface Registry {
/**
- * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ * AddFuncs registers new global template functions.
+ *
+ * The key of each map entry is the function name that will be used in the templates.
+ * If a function with the map entry name already exists it will be replaced with the new one.
+ *
+ * The value of each map entry is a function that must have either a
+ * single return value, or two return values of which the second has type error.
+ *
+ * Example:
+ *
+ * r.AddFuncs(map[string]any{
+ * ```
+ * "toUpper": func(str string) string {
+ * return strings.ToUppser(str)
+ * },
+ * ...
+ * ```
+ * })
*/
- dropForeignKey(table: string): (Query)
+ addFuncs(funcs: _TygojaDict): (Registry)
}
- interface BaseBuilder {
+ interface Registry {
/**
- * CreateIndex creates a Query that can be used to create an index for a table.
+ * LoadFiles caches (if not already) the specified filenames set as a
+ * single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 filename specified.
*/
- createIndex(table: string, ...cols: string[]): (Query)
+ loadFiles(...filenames: string[]): (Renderer)
}
- interface BaseBuilder {
+ interface Registry {
/**
- * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
+ * LoadString caches (if not already) the specified inline string as a
+ * single template and returns a ready to use Renderer instance.
*/
- createUniqueIndex(table: string, ...cols: string[]): (Query)
+ loadString(text: string): (Renderer)
}
- interface BaseBuilder {
+ interface Registry {
/**
- * DropIndex creates a Query that can be used to remove the named index from a table.
+ * LoadFS caches (if not already) the specified fs and globPatterns
+ * pair as single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 file matching the provided globPattern(s)
+ * (note that most file names serves as glob patterns matching themselves).
*/
- dropIndex(table: string): (Query)
+ loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer)
}
/**
- * MssqlBuilder is the builder for SQL Server databases.
+ * Renderer defines a single parsed template.
*/
- type _subeOzgU = BaseBuilder
- interface MssqlBuilder extends _subeOzgU {
+ interface Renderer {
+ }
+ interface Renderer {
+ /**
+ * Render executes the template with the specified data as the dot object
+ * and returns the result as plain string.
+ */
+ render(data: any): string
}
+}
+
+/**
+ * Package validation provides configurable and extensible rules for validating data of various types.
+ */
+namespace ozzo_validation {
/**
- * MssqlQueryBuilder is the query builder for SQL Server databases.
+ * Error interface represents an validation error
*/
- type _subPYpyy = BaseQueryBuilder
- interface MssqlQueryBuilder extends _subPYpyy {
+ interface Error {
+ [key:string]: any;
+ error(): string
+ code(): string
+ message(): string
+ setMessage(_arg0: string): Error
+ params(): _TygojaDict
+ setParams(_arg0: _TygojaDict): Error
}
- interface newMssqlBuilder {
+}
+
+namespace middleware {
+ interface bodyLimit {
/**
- * NewMssqlBuilder creates a new MssqlBuilder instance.
+ * BodyLimit returns a BodyLimit middleware.
+ *
+ * BodyLimit middleware sets the maximum allowed size for a request body, if the size exceeds the configured limit, it
+ * sends "413 - Request Entity Too Large" response. The BodyLimit is determined based on both `Content-Length` request
+ * header and actual content read, which makes it super secure.
*/
- (db: DB, executor: Executor): Builder
+ (limitBytes: number): echo.MiddlewareFunc
}
- interface MssqlBuilder {
+ interface gzip {
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * Gzip returns a middleware which compresses HTTP response using gzip compression scheme.
*/
- queryBuilder(): QueryBuilder
+ (): echo.MiddlewareFunc
}
- interface MssqlBuilder {
+}
+
+/**
+ * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases.
+ */
+namespace dbx {
+ /**
+ * Builder supports building SQL statements in a DB-agnostic way.
+ * Builder mainly provides two sets of query building methods: those building SELECT statements
+ * and those manipulating DB data or schema (e.g. INSERT statements, CREATE TABLE statements).
+ */
+ interface Builder {
+ [key:string]: any;
+ /**
+ * NewQuery creates a new Query object with the given SQL statement.
+ * The SQL statement may contain parameter placeholders which can be bound with actual parameter
+ * values before the statement is executed.
+ */
+ newQuery(_arg0: string): (Query)
/**
* Select returns a new SelectQuery object that can be used to build a SELECT statement.
* The parameters to this method should be the list column names to be selected.
* A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- select(...cols: string[]): (SelectQuery)
- }
- interface MssqlBuilder {
+ select(..._arg0: string[]): (SelectQuery)
/**
- * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
- * The model passed to this method should be a pointer to a model struct.
+ * ModelQuery returns a new ModelQuery object that can be used to perform model insertion, update, and deletion.
+ * The parameter to this method should be a pointer to the model struct that needs to be inserted, updated, or deleted.
*/
- model(model: {
- }): (ModelQuery)
- }
- interface MssqlBuilder {
+ model(_arg0: {
+ }): (ModelQuery)
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(_arg0: number): string
+ /**
+ * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ */
+ quote(_arg0: string): string
/**
* QuoteSimpleTableName quotes a simple table name.
* A simple table name does not contain any schema prefix.
*/
- quoteSimpleTableName(s: string): string
- }
- interface MssqlBuilder {
+ quoteSimpleTableName(_arg0: string): string
/**
* QuoteSimpleColumnName quotes a simple column name.
* A simple column name does not contain any table prefix.
*/
- quoteSimpleColumnName(s: string): string
- }
- interface MssqlBuilder {
+ quoteSimpleColumnName(_arg0: string): string
/**
- * RenameTable creates a Query that can be used to rename a table.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- renameTable(oldName: string): (Query)
- }
- interface MssqlBuilder {
+ queryBuilder(): QueryBuilder
/**
- * RenameColumn creates a Query that can be used to rename a column in a table.
+ * Insert creates a Query that represents an INSERT SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
*/
- renameColumn(table: string): (Query)
- }
- interface MssqlBuilder {
+ insert(table: string, cols: Params): (Query)
/**
- * AlterColumn creates a Query that can be used to change the definition of a table column.
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
*/
- alterColumn(table: string): (Query)
- }
- interface MssqlQueryBuilder {
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
/**
- * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ * Update creates a Query that represents an UPDATE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding new column
+ * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will update ALL rows in the table).
*/
- buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
- }
- /**
- * MysqlBuilder is the builder for MySQL databases.
- */
- type _subshvgw = BaseBuilder
- interface MysqlBuilder extends _subshvgw {
- }
- interface newMysqlBuilder {
+ update(table: string, cols: Params, where: Expression): (Query)
/**
- * NewMysqlBuilder creates a new MysqlBuilder instance.
+ * Delete creates a Query that represents a DELETE SQL statement.
+ * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will delete ALL rows in the table).
*/
- (db: DB, executor: Executor): Builder
- }
- interface MysqlBuilder {
+ delete(table: string, where: Expression): (Query)
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column types.
+ * The optional "options" parameters will be appended to the generated SQL statement.
*/
- queryBuilder(): QueryBuilder
- }
- interface MysqlBuilder {
+ createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * RenameTable creates a Query that can be used to rename a table.
*/
- select(...cols: string[]): (SelectQuery)
- }
- interface MysqlBuilder {
+ renameTable(oldName: string, newName: string): (Query)
/**
- * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
- * The model passed to this method should be a pointer to a model struct.
+ * DropTable creates a Query that can be used to drop a table.
*/
- model(model: {
- }): (ModelQuery)
- }
- interface MysqlBuilder {
+ dropTable(table: string): (Query)
/**
- * QuoteSimpleTableName quotes a simple table name.
- * A simple table name does not contain any schema prefix.
+ * TruncateTable creates a Query that can be used to truncate a table.
*/
- quoteSimpleTableName(s: string): string
- }
- interface MysqlBuilder {
+ truncateTable(table: string): (Query)
/**
- * QuoteSimpleColumnName quotes a simple column name.
- * A simple column name does not contain any table prefix.
+ * AddColumn creates a Query that can be used to add a column to a table.
*/
- quoteSimpleColumnName(s: string): string
- }
- interface MysqlBuilder {
+ addColumn(table: string, col: string, typ: string): (Query)
/**
- * Upsert creates a Query that represents an UPSERT SQL statement.
- * Upsert inserts a row into the table if the primary key or unique index is not found.
- * Otherwise it will update the row with the new values.
- * The keys of cols are the column names, while the values of cols are the corresponding column
- * values to be inserted.
+ * DropColumn creates a Query that can be used to drop a column from a table.
*/
- upsert(table: string, cols: Params, ...constraints: string[]): (Query)
- }
- interface MysqlBuilder {
+ dropColumn(table: string, col: string): (Query)
/**
* RenameColumn creates a Query that can be used to rename a column in a table.
*/
- renameColumn(table: string): (Query)
- }
- interface MysqlBuilder {
+ renameColumn(table: string, oldName: string, newName: string): (Query)
/**
- * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- dropPrimaryKey(table: string): (Query)
- }
- interface MysqlBuilder {
+ alterColumn(table: string, col: string, typ: string): (Query)
/**
- * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
+ * The "name" parameter specifies the name of the primary key constraint.
*/
- dropForeignKey(table: string): (Query)
- }
- /**
- * OciBuilder is the builder for Oracle databases.
- */
- type _subTukdT = BaseBuilder
- interface OciBuilder extends _subTukdT {
- }
- /**
- * OciQueryBuilder is the query builder for Oracle databases.
- */
- type _suboNUbJ = BaseQueryBuilder
- interface OciQueryBuilder extends _suboNUbJ {
- }
- interface newOciBuilder {
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
/**
- * NewOciBuilder creates a new OciBuilder instance.
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
*/
- (db: DB, executor: Executor): Builder
- }
- interface OciBuilder {
+ dropPrimaryKey(table: string, name: string): (Query)
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
+ * The length of cols and refCols must be the same as they refer to the primary and referential columns.
+ * The optional "options" parameters will be appended to the SQL statement. They can be used to
+ * specify options such as "ON DELETE CASCADE".
*/
- select(...cols: string[]): (SelectQuery)
- }
- interface OciBuilder {
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
/**
- * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
- * The model passed to this method should be a pointer to a model struct.
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
*/
- model(model: {
- }): (ModelQuery)
- }
- interface OciBuilder {
+ dropForeignKey(table: string, name: string): (Query)
/**
- * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ * CreateIndex creates a Query that can be used to create an index for a table.
*/
- generatePlaceholder(i: number): string
- }
- interface OciBuilder {
+ createIndex(table: string, name: string, ...cols: string[]): (Query)
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
*/
- queryBuilder(): QueryBuilder
- }
- interface OciBuilder {
+ createUniqueIndex(table: string, name: string, ...cols: string[]): (Query)
/**
* DropIndex creates a Query that can be used to remove the named index from a table.
*/
- dropIndex(table: string): (Query)
+ dropIndex(table: string, name: string): (Query)
}
- interface OciBuilder {
+ /**
+ * BaseBuilder provides a basic implementation of the Builder interface.
+ */
+ interface BaseBuilder {
+ }
+ interface newBaseBuilder {
/**
- * RenameTable creates a Query that can be used to rename a table.
+ * NewBaseBuilder creates a new BaseBuilder instance.
*/
- renameTable(oldName: string): (Query)
+ (db: DB, executor: Executor): (BaseBuilder)
}
- interface OciBuilder {
+ interface BaseBuilder {
/**
- * AlterColumn creates a Query that can be used to change the definition of a table column.
+ * DB returns the DB instance that this builder is associated with.
*/
- alterColumn(table: string): (Query)
+ db(): (DB)
}
- interface OciQueryBuilder {
+ interface BaseBuilder {
/**
- * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ * Executor returns the executor object (a DB instance or a transaction) for executing SQL statements.
*/
- buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
+ executor(): Executor
}
- /**
- * PgsqlBuilder is the builder for PostgreSQL databases.
- */
- type _subKWcqA = BaseBuilder
- interface PgsqlBuilder extends _subKWcqA {
+ interface BaseBuilder {
+ /**
+ * NewQuery creates a new Query object with the given SQL statement.
+ * The SQL statement may contain parameter placeholders which can be bound with actual parameter
+ * values before the statement is executed.
+ */
+ newQuery(sql: string): (Query)
}
- interface newPgsqlBuilder {
+ interface BaseBuilder {
/**
- * NewPgsqlBuilder creates a new PgsqlBuilder instance.
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
*/
- (db: DB, executor: Executor): Builder
+ generatePlaceholder(_arg0: number): string
}
- interface PgsqlBuilder {
+ interface BaseBuilder {
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
*/
- select(...cols: string[]): (SelectQuery)
+ quote(s: string): string
}
- interface PgsqlBuilder {
+ interface BaseBuilder {
/**
- * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
- * The model passed to this method should be a pointer to a model struct.
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
*/
- model(model: {
- }): (ModelQuery)
+ quoteSimpleTableName(s: string): string
}
- interface PgsqlBuilder {
+ interface BaseBuilder {
/**
- * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
*/
- generatePlaceholder(i: number): string
+ quoteSimpleColumnName(s: string): string
}
- interface PgsqlBuilder {
+ interface BaseBuilder {
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * Insert creates a Query that represents an INSERT SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
*/
- queryBuilder(): QueryBuilder
+ insert(table: string, cols: Params): (Query)
}
- interface PgsqlBuilder {
+ interface BaseBuilder {
/**
* Upsert creates a Query that represents an UPSERT SQL statement.
* Upsert inserts a row into the table if the primary key or unique index is not found.
@@ -3459,143 +3598,144 @@ namespace dbx {
*/
upsert(table: string, cols: Params, ...constraints: string[]): (Query)
}
- interface PgsqlBuilder {
- /**
- * DropIndex creates a Query that can be used to remove the named index from a table.
- */
- dropIndex(table: string): (Query)
- }
- interface PgsqlBuilder {
- /**
- * RenameTable creates a Query that can be used to rename a table.
- */
- renameTable(oldName: string): (Query)
- }
- interface PgsqlBuilder {
- /**
- * AlterColumn creates a Query that can be used to change the definition of a table column.
- */
- alterColumn(table: string): (Query)
- }
- /**
- * SqliteBuilder is the builder for SQLite databases.
- */
- type _subsoHkv = BaseBuilder
- interface SqliteBuilder extends _subsoHkv {
- }
- interface newSqliteBuilder {
+ interface BaseBuilder {
/**
- * NewSqliteBuilder creates a new SqliteBuilder instance.
+ * Update creates a Query that represents an UPDATE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding new column
+ * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will update ALL rows in the table).
*/
- (db: DB, executor: Executor): Builder
+ update(table: string, cols: Params, where: Expression): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * QueryBuilder returns the query builder supporting the current DB.
+ * Delete creates a Query that represents a DELETE SQL statement.
+ * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will delete ALL rows in the table).
*/
- queryBuilder(): QueryBuilder
+ delete(table: string, where: Expression): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * Select returns a new SelectQuery object that can be used to build a SELECT statement.
- * The parameters to this method should be the list column names to be selected.
- * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column types.
+ * The optional "options" parameters will be appended to the generated SQL statement.
*/
- select(...cols: string[]): (SelectQuery)
+ createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
- * The model passed to this method should be a pointer to a model struct.
+ * RenameTable creates a Query that can be used to rename a table.
*/
- model(model: {
- }): (ModelQuery)
+ renameTable(oldName: string, newName: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * QuoteSimpleTableName quotes a simple table name.
- * A simple table name does not contain any schema prefix.
+ * DropTable creates a Query that can be used to drop a table.
*/
- quoteSimpleTableName(s: string): string
+ dropTable(table: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * QuoteSimpleColumnName quotes a simple column name.
- * A simple column name does not contain any table prefix.
+ * TruncateTable creates a Query that can be used to truncate a table.
*/
- quoteSimpleColumnName(s: string): string
+ truncateTable(table: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * DropIndex creates a Query that can be used to remove the named index from a table.
+ * AddColumn creates a Query that can be used to add a column to a table.
*/
- dropIndex(table: string): (Query)
+ addColumn(table: string, col: string, typ: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * TruncateTable creates a Query that can be used to truncate a table.
+ * DropColumn creates a Query that can be used to drop a column from a table.
*/
- truncateTable(table: string): (Query)
+ dropColumn(table: string, col: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
- * RenameTable creates a Query that can be used to rename a table.
+ * RenameColumn creates a Query that can be used to rename a column in a table.
*/
- renameTable(oldName: string): (Query)
+ renameColumn(table: string, oldName: string, newName: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
* AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- alterColumn(table: string): (Query)
+ alterColumn(table: string, col: string, typ: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
* AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
* The "name" parameter specifies the name of the primary key constraint.
*/
- addPrimaryKey(table: string, ...cols: string[]): (Query)
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
* DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
*/
- dropPrimaryKey(table: string): (Query)
+ dropPrimaryKey(table: string, name: string): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
* AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
* The length of cols and refCols must be the same as they refer to the primary and referential columns.
* The optional "options" parameters will be appended to the SQL statement. They can be used to
* specify options such as "ON DELETE CASCADE".
*/
- addForeignKey(table: string, cols: Array, refTable: string, ...options: string[]): (Query)
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
}
- interface SqliteBuilder {
+ interface BaseBuilder {
/**
* DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
*/
- dropForeignKey(table: string): (Query)
- }
- /**
- * StandardBuilder is the builder that is used by DB for an unknown driver.
- */
- type _subZtxuI = BaseBuilder
- interface StandardBuilder extends _subZtxuI {
+ dropForeignKey(table: string, name: string): (Query)
}
- interface newStandardBuilder {
+ interface BaseBuilder {
/**
- * NewStandardBuilder creates a new StandardBuilder instance.
+ * CreateIndex creates a Query that can be used to create an index for a table.
+ */
+ createIndex(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
+ */
+ createUniqueIndex(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ /**
+ * MssqlBuilder is the builder for SQL Server databases.
+ */
+ type _subrFKDD = BaseBuilder
+ interface MssqlBuilder extends _subrFKDD {
+ }
+ /**
+ * MssqlQueryBuilder is the query builder for SQL Server databases.
+ */
+ type _submHtvV = BaseQueryBuilder
+ interface MssqlQueryBuilder extends _submHtvV {
+ }
+ interface newMssqlBuilder {
+ /**
+ * NewMssqlBuilder creates a new MssqlBuilder instance.
*/
(db: DB, executor: Executor): Builder
}
- interface StandardBuilder {
+ interface MssqlBuilder {
/**
* QueryBuilder returns the query builder supporting the current DB.
*/
queryBuilder(): QueryBuilder
}
- interface StandardBuilder {
+ interface MssqlBuilder {
/**
* Select returns a new SelectQuery object that can be used to build a SELECT statement.
* The parameters to this method should be the list column names to be selected.
@@ -3603,7 +3743,7 @@ namespace dbx {
*/
select(...cols: string[]): (SelectQuery)
}
- interface StandardBuilder {
+ interface MssqlBuilder {
/**
* Model returns a new ModelQuery object that can be used to perform model-based DB operations.
* The model passed to this method should be a pointer to a model struct.
@@ -3611,572 +3751,444 @@ namespace dbx {
model(model: {
}): (ModelQuery)
}
- /**
- * LogFunc logs a message for each SQL statement being executed.
- * This method takes one or multiple parameters. If a single parameter
- * is provided, it will be treated as the log message. If multiple parameters
- * are provided, they will be passed to fmt.Sprintf() to generate the log message.
- */
- interface LogFunc {(format: string, ...a: {
- }[]): void }
- /**
- * PerfFunc is called when a query finishes execution.
- * The query execution time is passed to this function so that the DB performance
- * can be profiled. The "ns" parameter gives the number of nanoseconds that the
- * SQL statement takes to execute, while the "execute" parameter indicates whether
- * the SQL statement is executed or queried (usually SELECT statements).
- */
- interface PerfFunc {(ns: number, sql: string, execute: boolean): void }
- /**
- * QueryLogFunc is called each time when performing a SQL query.
- * The "t" parameter gives the time that the SQL statement takes to execute,
- * while rows and err are the result of the query.
- */
- interface QueryLogFunc {(ctx: context.Context, t: time.Duration, sql: string, rows: sql.Rows, err: Error): void }
- /**
- * ExecLogFunc is called each time when a SQL statement is executed.
- * The "t" parameter gives the time that the SQL statement takes to execute,
- * while result and err refer to the result of the execution.
- */
- interface ExecLogFunc {(ctx: context.Context, t: time.Duration, sql: string, result: sql.Result, err: Error): void }
- /**
- * BuilderFunc creates a Builder instance using the given DB instance and Executor.
- */
- interface BuilderFunc {(_arg0: DB, _arg1: Executor): Builder }
- /**
- * DB enhances sql.DB by providing a set of DB-agnostic query building methods.
- * DB allows easier query building and population of data into Go variables.
- */
- type _subwSDJJ = Builder
- interface DB extends _subwSDJJ {
+ interface MssqlBuilder {
/**
- * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc.
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
*/
- fieldMapper: FieldMapFunc
+ quoteSimpleTableName(s: string): string
+ }
+ interface MssqlBuilder {
/**
- * TableMapper maps structs to table names. Defaults to GetTableName.
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
*/
- tableMapper: TableMapFunc
+ quoteSimpleColumnName(s: string): string
+ }
+ interface MssqlBuilder {
/**
- * LogFunc logs the SQL statements being executed. Defaults to nil, meaning no logging.
+ * RenameTable creates a Query that can be used to rename a table.
*/
- logFunc: LogFunc
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface MssqlBuilder {
/**
- * PerfFunc logs the SQL execution time. Defaults to nil, meaning no performance profiling.
- * Deprecated: Please use QueryLogFunc and ExecLogFunc instead.
+ * RenameColumn creates a Query that can be used to rename a column in a table.
*/
- perfFunc: PerfFunc
+ renameColumn(table: string, oldName: string, newName: string): (Query)
+ }
+ interface MssqlBuilder {
/**
- * QueryLogFunc is called each time when performing a SQL query that returns data.
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- queryLogFunc: QueryLogFunc
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface MssqlQueryBuilder {
/**
- * ExecLogFunc is called each time when a SQL statement is executed.
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
*/
- execLogFunc: ExecLogFunc
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
}
/**
- * Errors represents a list of errors.
+ * MysqlBuilder is the builder for MySQL databases.
*/
- interface Errors extends Array{}
- interface newFromDB {
+ type _subIVLoN = BaseBuilder
+ interface MysqlBuilder extends _subIVLoN {
+ }
+ interface newMysqlBuilder {
/**
- * NewFromDB encapsulates an existing database connection.
+ * NewMysqlBuilder creates a new MysqlBuilder instance.
*/
- (sqlDB: sql.DB, driverName: string): (DB)
+ (db: DB, executor: Executor): Builder
}
- interface open {
+ interface MysqlBuilder {
/**
- * Open opens a database specified by a driver name and data source name (DSN).
- * Note that Open does not check if DSN is specified correctly. It doesn't try to establish a DB connection either.
- * Please refer to sql.Open() for more information.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- (driverName: string): (DB)
+ queryBuilder(): QueryBuilder
}
- interface mustOpen {
+ interface MysqlBuilder {
/**
- * MustOpen opens a database and establishes a connection to it.
- * Please refer to sql.Open() and sql.Ping() for more information.
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- (driverName: string): (DB)
+ select(...cols: string[]): (SelectQuery)
}
- interface DB {
+ interface MysqlBuilder {
/**
- * Clone makes a shallow copy of DB.
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
*/
- clone(): (DB)
+ model(model: {
+ }): (ModelQuery)
}
- interface DB {
+ interface MysqlBuilder {
/**
- * WithContext returns a new instance of DB associated with the given context.
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
*/
- withContext(ctx: context.Context): (DB)
+ quoteSimpleTableName(s: string): string
}
- interface DB {
+ interface MysqlBuilder {
/**
- * Context returns the context associated with the DB instance.
- * It returns nil if no context is associated.
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
*/
- context(): context.Context
+ quoteSimpleColumnName(s: string): string
}
- interface DB {
+ interface MysqlBuilder {
/**
- * DB returns the sql.DB instance encapsulated by dbx.DB.
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
*/
- db(): (sql.DB)
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
}
- interface DB {
+ interface MysqlBuilder {
/**
- * Close closes the database, releasing any open resources.
- * It is rare to Close a DB, as the DB handle is meant to be
- * long-lived and shared between many goroutines.
+ * RenameColumn creates a Query that can be used to rename a column in a table.
*/
- close(): void
+ renameColumn(table: string, oldName: string, newName: string): (Query)
}
- interface DB {
+ interface MysqlBuilder {
/**
- * Begin starts a transaction.
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
*/
- begin(): (Tx)
+ dropPrimaryKey(table: string, name: string): (Query)
}
- interface DB {
+ interface MysqlBuilder {
/**
- * BeginTx starts a transaction with the given context and transaction options.
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
*/
- beginTx(ctx: context.Context, opts: sql.TxOptions): (Tx)
+ dropForeignKey(table: string, name: string): (Query)
}
- interface DB {
+ /**
+ * OciBuilder is the builder for Oracle databases.
+ */
+ type _subfiTVV = BaseBuilder
+ interface OciBuilder extends _subfiTVV {
+ }
+ /**
+ * OciQueryBuilder is the query builder for Oracle databases.
+ */
+ type _subrSBRI = BaseQueryBuilder
+ interface OciQueryBuilder extends _subrSBRI {
+ }
+ interface newOciBuilder {
/**
- * Wrap encapsulates an existing transaction.
+ * NewOciBuilder creates a new OciBuilder instance.
*/
- wrap(sqlTx: sql.Tx): (Tx)
+ (db: DB, executor: Executor): Builder
}
- interface DB {
+ interface OciBuilder {
/**
- * Transactional starts a transaction and executes the given function.
- * If the function returns an error, the transaction will be rolled back.
- * Otherwise, the transaction will be committed.
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- transactional(f: (_arg0: Tx) => void): void
+ select(...cols: string[]): (SelectQuery)
}
- interface DB {
+ interface OciBuilder {
/**
- * TransactionalContext starts a transaction and executes the given function with the given context and transaction options.
- * If the function returns an error, the transaction will be rolled back.
- * Otherwise, the transaction will be committed.
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
*/
- transactionalContext(ctx: context.Context, opts: sql.TxOptions, f: (_arg0: Tx) => void): void
+ model(model: {
+ }): (ModelQuery)
}
- interface DB {
+ interface OciBuilder {
/**
- * DriverName returns the name of the DB driver.
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
*/
- driverName(): string
+ generatePlaceholder(i: number): string
}
- interface DB {
+ interface OciBuilder {
/**
- * QuoteTableName quotes the given table name appropriately.
- * If the table name contains DB schema prefix, it will be handled accordingly.
- * This method will do nothing if the table name is already quoted or if it contains parenthesis.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- quoteTableName(s: string): string
+ queryBuilder(): QueryBuilder
}
- interface DB {
+ interface OciBuilder {
/**
- * QuoteColumnName quotes the given column name appropriately.
- * If the table name contains table name prefix, it will be handled accordingly.
- * This method will do nothing if the column name is already quoted or if it contains parenthesis.
+ * DropIndex creates a Query that can be used to remove the named index from a table.
*/
- quoteColumnName(s: string): string
+ dropIndex(table: string, name: string): (Query)
}
- interface Errors {
+ interface OciBuilder {
/**
- * Error returns the error string of Errors.
+ * RenameTable creates a Query that can be used to rename a table.
*/
- error(): string
+ renameTable(oldName: string, newName: string): (Query)
}
- /**
- * Expression represents a DB expression that can be embedded in a SQL statement.
- */
- interface Expression {
- [key:string]: any;
+ interface OciBuilder {
/**
- * Build converts an expression into a SQL fragment.
- * If the expression contains binding parameters, they will be added to the given Params.
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- build(_arg0: DB, _arg1: Params): string
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface OciQueryBuilder {
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
}
/**
- * HashExp represents a hash expression.
- *
- * A hash expression is a map whose keys are DB column names which need to be filtered according
- * to the corresponding values. For example, HashExp{"level": 2, "dept": 10} will generate
- * the SQL: "level"=2 AND "dept"=10.
- *
- * HashExp also handles nil values and slice values. For example, HashExp{"level": []interface{}{1, 2}, "dept": nil}
- * will generate: "level" IN (1, 2) AND "dept" IS NULL.
+ * PgsqlBuilder is the builder for PostgreSQL databases.
*/
- interface HashExp extends _TygojaDict{}
- interface newExp {
+ type _subtFJti = BaseBuilder
+ interface PgsqlBuilder extends _subtFJti {
+ }
+ interface newPgsqlBuilder {
/**
- * NewExp generates an expression with the specified SQL fragment and the optional binding parameters.
+ * NewPgsqlBuilder creates a new PgsqlBuilder instance.
*/
- (e: string, ...params: Params[]): Expression
+ (db: DB, executor: Executor): Builder
}
- interface not {
+ interface PgsqlBuilder {
/**
- * Not generates a NOT expression which prefixes "NOT" to the specified expression.
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- (e: Expression): Expression
+ select(...cols: string[]): (SelectQuery)
}
- interface and {
+ interface PgsqlBuilder {
/**
- * And generates an AND expression which concatenates the given expressions with "AND".
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
*/
- (...exps: Expression[]): Expression
+ model(model: {
+ }): (ModelQuery)
}
- interface or {
+ interface PgsqlBuilder {
/**
- * Or generates an OR expression which concatenates the given expressions with "OR".
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
*/
- (...exps: Expression[]): Expression
+ generatePlaceholder(i: number): string
}
- interface _in {
+ interface PgsqlBuilder {
/**
- * In generates an IN expression for the specified column and the list of allowed values.
- * If values is empty, a SQL "0=1" will be generated which represents a false expression.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- (col: string, ...values: {
- }[]): Expression
+ queryBuilder(): QueryBuilder
}
- interface notIn {
+ interface PgsqlBuilder {
/**
- * NotIn generates an NOT IN expression for the specified column and the list of disallowed values.
- * If values is empty, an empty string will be returned indicating a true expression.
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
*/
- (col: string, ...values: {
- }[]): Expression
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
}
- interface like {
+ interface PgsqlBuilder {
/**
- * Like generates a LIKE expression for the specified column and the possible strings that the column should be like.
- * If multiple values are present, the column should be like *all* of them. For example, Like("name", "key", "word")
- * will generate a SQL expression: "name" LIKE "%key%" AND "name" LIKE "%word%".
- *
- * By default, each value will be surrounded by "%" to enable partial matching. If a value contains special characters
- * such as "%", "\", "_", they will also be properly escaped.
- *
- * You may call Escape() and/or Match() to change the default behavior. For example, Like("name", "key").Match(false, true)
- * generates "name" LIKE "key%".
+ * DropIndex creates a Query that can be used to remove the named index from a table.
*/
- (col: string, ...values: string[]): (LikeExp)
+ dropIndex(table: string, name: string): (Query)
}
- interface notLike {
+ interface PgsqlBuilder {
/**
- * NotLike generates a NOT LIKE expression.
- * For example, NotLike("name", "key", "word") will generate a SQL expression:
- * "name" NOT LIKE "%key%" AND "name" NOT LIKE "%word%". Please see Like() for more details.
+ * RenameTable creates a Query that can be used to rename a table.
*/
- (col: string, ...values: string[]): (LikeExp)
+ renameTable(oldName: string, newName: string): (Query)
}
- interface orLike {
+ interface PgsqlBuilder {
/**
- * OrLike generates an OR LIKE expression.
- * This is similar to Like() except that the column should be like one of the possible values.
- * For example, OrLike("name", "key", "word") will generate a SQL expression:
- * "name" LIKE "%key%" OR "name" LIKE "%word%". Please see Like() for more details.
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- (col: string, ...values: string[]): (LikeExp)
+ alterColumn(table: string, col: string, typ: string): (Query)
}
- interface orNotLike {
+ /**
+ * SqliteBuilder is the builder for SQLite databases.
+ */
+ type _subrBNop = BaseBuilder
+ interface SqliteBuilder extends _subrBNop {
+ }
+ interface newSqliteBuilder {
/**
- * OrNotLike generates an OR NOT LIKE expression.
- * For example, OrNotLike("name", "key", "word") will generate a SQL expression:
- * "name" NOT LIKE "%key%" OR "name" NOT LIKE "%word%". Please see Like() for more details.
+ * NewSqliteBuilder creates a new SqliteBuilder instance.
*/
- (col: string, ...values: string[]): (LikeExp)
+ (db: DB, executor: Executor): Builder
}
- interface exists {
+ interface SqliteBuilder {
/**
- * Exists generates an EXISTS expression by prefixing "EXISTS" to the given expression.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- (exp: Expression): Expression
+ queryBuilder(): QueryBuilder
}
- interface notExists {
+ interface SqliteBuilder {
/**
- * NotExists generates an EXISTS expression by prefixing "NOT EXISTS" to the given expression.
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- (exp: Expression): Expression
+ select(...cols: string[]): (SelectQuery)
}
- interface between {
+ interface SqliteBuilder {
/**
- * Between generates a BETWEEN expression.
- * For example, Between("age", 10, 30) generates: "age" BETWEEN 10 AND 30
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
*/
- (col: string, from: {
- }): Expression
+ model(model: {
+ }): (ModelQuery)
}
- interface notBetween {
+ interface SqliteBuilder {
/**
- * NotBetween generates a NOT BETWEEN expression.
- * For example, NotBetween("age", 10, 30) generates: "age" NOT BETWEEN 10 AND 30
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
*/
- (col: string, from: {
- }): Expression
- }
- /**
- * Exp represents an expression with a SQL fragment and a list of optional binding parameters.
- */
- interface Exp {
+ quoteSimpleTableName(s: string): string
}
- interface Exp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
*/
- build(db: DB, params: Params): string
+ quoteSimpleColumnName(s: string): string
}
- interface HashExp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * DropIndex creates a Query that can be used to remove the named index from a table.
*/
- build(db: DB, params: Params): string
- }
- /**
- * NotExp represents an expression that should prefix "NOT" to a specified expression.
- */
- interface NotExp {
+ dropIndex(table: string, name: string): (Query)
}
- interface NotExp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * TruncateTable creates a Query that can be used to truncate a table.
*/
- build(db: DB, params: Params): string
- }
- /**
- * AndOrExp represents an expression that concatenates multiple expressions using either "AND" or "OR".
- */
- interface AndOrExp {
+ truncateTable(table: string): (Query)
}
- interface AndOrExp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * RenameTable creates a Query that can be used to rename a table.
*/
- build(db: DB, params: Params): string
- }
- /**
- * InExp represents an "IN" or "NOT IN" expression.
- */
- interface InExp {
+ renameTable(oldName: string, newName: string): (Query)
}
- interface InExp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
*/
- build(db: DB, params: Params): string
+ alterColumn(table: string, col: string, typ: string): (Query)
}
- /**
- * LikeExp represents a variant of LIKE expressions.
- */
- interface LikeExp {
+ interface SqliteBuilder {
/**
- * Like stores the LIKE operator. It can be "LIKE", "NOT LIKE".
- * It may also be customized as something like "ILIKE".
+ * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
+ * The "name" parameter specifies the name of the primary key constraint.
*/
- like: string
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
}
- interface LikeExp {
+ interface SqliteBuilder {
/**
- * Escape specifies how a LIKE expression should be escaped.
- * Each string at position 2i represents a special character and the string at position 2i+1 is
- * the corresponding escaped version.
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
*/
- escape(...chars: string[]): (LikeExp)
+ dropPrimaryKey(table: string, name: string): (Query)
}
- interface LikeExp {
+ interface SqliteBuilder {
/**
- * Match specifies whether to do wildcard matching on the left and/or right of given strings.
+ * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
+ * The length of cols and refCols must be the same as they refer to the primary and referential columns.
+ * The optional "options" parameters will be appended to the SQL statement. They can be used to
+ * specify options such as "ON DELETE CASCADE".
*/
- match(left: boolean): (LikeExp)
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
}
- interface LikeExp {
+ interface SqliteBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
*/
- build(db: DB, params: Params): string
+ dropForeignKey(table: string, name: string): (Query)
}
/**
- * ExistsExp represents an EXISTS or NOT EXISTS expression.
+ * StandardBuilder is the builder that is used by DB for an unknown driver.
*/
- interface ExistsExp {
+ type _subesQFA = BaseBuilder
+ interface StandardBuilder extends _subesQFA {
}
- interface ExistsExp {
+ interface newStandardBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * NewStandardBuilder creates a new StandardBuilder instance.
*/
- build(db: DB, params: Params): string
- }
- /**
- * BetweenExp represents a BETWEEN or a NOT BETWEEN expression.
- */
- interface BetweenExp {
+ (db: DB, executor: Executor): Builder
}
- interface BetweenExp {
+ interface StandardBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * QueryBuilder returns the query builder supporting the current DB.
*/
- build(db: DB, params: Params): string
+ queryBuilder(): QueryBuilder
}
- interface enclose {
+ interface StandardBuilder {
/**
- * Enclose surrounds the provided nonempty expression with parenthesis "()".
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
*/
- (exp: Expression): Expression
- }
- /**
- * EncloseExp represents a parenthesis enclosed expression.
- */
- interface EncloseExp {
+ select(...cols: string[]): (SelectQuery)
}
- interface EncloseExp {
+ interface StandardBuilder {
/**
- * Build converts an expression into a SQL fragment.
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
*/
- build(db: DB, params: Params): string
+ model(model: {
+ }): (ModelQuery)
}
/**
- * TableModel is the interface that should be implemented by models which have unconventional table names.
+ * LogFunc logs a message for each SQL statement being executed.
+ * This method takes one or multiple parameters. If a single parameter
+ * is provided, it will be treated as the log message. If multiple parameters
+ * are provided, they will be passed to fmt.Sprintf() to generate the log message.
*/
- interface TableModel {
- [key:string]: any;
- tableName(): string
- }
- /**
- * ModelQuery represents a query associated with a struct model.
- */
- interface ModelQuery {
- }
- interface newModelQuery {
- (model: {
- }, fieldMapFunc: FieldMapFunc, db: DB, builder: Builder): (ModelQuery)
- }
- interface ModelQuery {
- /**
- * Context returns the context associated with the query.
- */
- context(): context.Context
- }
- interface ModelQuery {
- /**
- * WithContext associates a context with the query.
- */
- withContext(ctx: context.Context): (ModelQuery)
- }
- interface ModelQuery {
- /**
- * Exclude excludes the specified struct fields from being inserted/updated into the DB table.
- */
- exclude(...attrs: string[]): (ModelQuery)
- }
- interface ModelQuery {
- /**
- * Insert inserts a row in the table using the struct model associated with this query.
- *
- * By default, it inserts *all* public fields into the table, including those nil or empty ones.
- * You may pass a list of the fields to this method to indicate that only those fields should be inserted.
- * You may also call Exclude to exclude some fields from being inserted.
- *
- * If a model has an empty primary key, it is considered auto-incremental and the corresponding struct
- * field will be filled with the generated primary key value after a successful insertion.
- */
- insert(...attrs: string[]): void
- }
- interface ModelQuery {
- /**
- * Update updates a row in the table using the struct model associated with this query.
- * The row being updated has the same primary key as specified by the model.
- *
- * By default, it updates *all* public fields in the table, including those nil or empty ones.
- * You may pass a list of the fields to this method to indicate that only those fields should be updated.
- * You may also call Exclude to exclude some fields from being updated.
- */
- update(...attrs: string[]): void
- }
- interface ModelQuery {
- /**
- * Delete deletes a row in the table using the primary key specified by the struct model associated with this query.
- */
- delete(): void
- }
- /**
- * ExecHookFunc executes before op allowing custom handling like auto fail/retry.
- */
- interface ExecHookFunc {(q: Query, op: () => void): void }
+ interface LogFunc {(format: string, ...a: {
+ }[]): void }
/**
- * OneHookFunc executes right before the query populate the row result from One() call (aka. op).
+ * PerfFunc is called when a query finishes execution.
+ * The query execution time is passed to this function so that the DB performance
+ * can be profiled. The "ns" parameter gives the number of nanoseconds that the
+ * SQL statement takes to execute, while the "execute" parameter indicates whether
+ * the SQL statement is executed or queried (usually SELECT statements).
*/
- interface OneHookFunc {(q: Query, a: {
- }, op: (b: {
- }) => void): void }
+ interface PerfFunc {(ns: number, sql: string, execute: boolean): void }
/**
- * AllHookFunc executes right before the query populate the row result from All() call (aka. op).
+ * QueryLogFunc is called each time when performing a SQL query.
+ * The "t" parameter gives the time that the SQL statement takes to execute,
+ * while rows and err are the result of the query.
*/
- interface AllHookFunc {(q: Query, sliceA: {
- }, op: (sliceB: {
- }) => void): void }
+ interface QueryLogFunc {(ctx: context.Context, t: time.Duration, sql: string, rows: sql.Rows, err: Error): void }
/**
- * Params represents a list of parameter values to be bound to a SQL statement.
- * The map keys are the parameter names while the map values are the corresponding parameter values.
+ * ExecLogFunc is called each time when a SQL statement is executed.
+ * The "t" parameter gives the time that the SQL statement takes to execute,
+ * while result and err refer to the result of the execution.
*/
- interface Params extends _TygojaDict{}
+ interface ExecLogFunc {(ctx: context.Context, t: time.Duration, sql: string, result: sql.Result, err: Error): void }
/**
- * Executor prepares, executes, or queries a SQL statement.
+ * BuilderFunc creates a Builder instance using the given DB instance and Executor.
*/
- interface Executor {
- [key:string]: any;
- /**
- * Exec executes a SQL statement
- */
- exec(query: string, ...args: {
- }[]): sql.Result
- /**
- * ExecContext executes a SQL statement with the given context
- */
- execContext(ctx: context.Context, query: string, ...args: {
- }[]): sql.Result
- /**
- * Query queries a SQL statement
- */
- query(query: string, ...args: {
- }[]): (sql.Rows)
- /**
- * QueryContext queries a SQL statement with the given context
- */
- queryContext(ctx: context.Context, query: string, ...args: {
- }[]): (sql.Rows)
- /**
- * Prepare creates a prepared statement
- */
- prepare(query: string): (sql.Stmt)
- }
+ interface BuilderFunc {(_arg0: DB, _arg1: Executor): Builder }
/**
- * Query represents a SQL statement to be executed.
+ * DB enhances sql.DB by providing a set of DB-agnostic query building methods.
+ * DB allows easier query building and population of data into Go variables.
*/
- interface Query {
+ type _subkWabA = Builder
+ interface DB extends _subkWabA {
/**
- * FieldMapper maps struct field names to DB column names.
+ * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc.
*/
fieldMapper: FieldMapFunc
/**
- * LastError contains the last error (if any) of the query.
- * LastError is cleared by Execute(), Row(), Rows(), One(), and All().
+ * TableMapper maps structs to table names. Defaults to GetTableName.
*/
- lastError: Error
+ tableMapper: TableMapFunc
/**
- * LogFunc is used to log the SQL statement being executed.
+ * LogFunc logs the SQL statements being executed. Defaults to nil, meaning no logging.
*/
logFunc: LogFunc
/**
- * PerfFunc is used to log the SQL execution time. It is ignored if nil.
+ * PerfFunc logs the SQL execution time. Defaults to nil, meaning no performance profiling.
* Deprecated: Please use QueryLogFunc and ExecLogFunc instead.
*/
perfFunc: PerfFunc
@@ -4189,1140 +4201,1313 @@ namespace dbx {
*/
execLogFunc: ExecLogFunc
}
- interface newQuery {
+ /**
+ * Errors represents a list of errors.
+ */
+ interface Errors extends Array{}
+ interface newFromDB {
/**
- * NewQuery creates a new Query with the given SQL statement.
+ * NewFromDB encapsulates an existing database connection.
*/
- (db: DB, executor: Executor, sql: string): (Query)
+ (sqlDB: sql.DB, driverName: string): (DB)
}
- interface Query {
+ interface open {
/**
- * SQL returns the original SQL used to create the query.
- * The actual SQL (RawSQL) being executed is obtained by replacing the named
- * parameter placeholders with anonymous ones.
+ * Open opens a database specified by a driver name and data source name (DSN).
+ * Note that Open does not check if DSN is specified correctly. It doesn't try to establish a DB connection either.
+ * Please refer to sql.Open() for more information.
*/
- sql(): string
+ (driverName: string, dsn: string): (DB)
}
- interface Query {
+ interface mustOpen {
/**
- * Context returns the context associated with the query.
+ * MustOpen opens a database and establishes a connection to it.
+ * Please refer to sql.Open() and sql.Ping() for more information.
*/
- context(): context.Context
+ (driverName: string, dsn: string): (DB)
}
- interface Query {
+ interface DB {
/**
- * WithContext associates a context with the query.
+ * Clone makes a shallow copy of DB.
*/
- withContext(ctx: context.Context): (Query)
+ clone(): (DB)
}
- interface Query {
+ interface DB {
/**
- * WithExecHook associates the provided exec hook function with the query.
- *
- * It is called for every Query resolver (Execute(), One(), All(), Row(), Column()),
- * allowing you to implement auto fail/retry or any other additional handling.
+ * WithContext returns a new instance of DB associated with the given context.
*/
- withExecHook(fn: ExecHookFunc): (Query)
+ withContext(ctx: context.Context): (DB)
}
- interface Query {
+ interface DB {
/**
- * WithOneHook associates the provided hook function with the query,
- * called on q.One(), allowing you to implement custom struct scan based
- * on the One() argument and/or result.
+ * Context returns the context associated with the DB instance.
+ * It returns nil if no context is associated.
*/
- withOneHook(fn: OneHookFunc): (Query)
+ context(): context.Context
}
- interface Query {
+ interface DB {
/**
- * WithOneHook associates the provided hook function with the query,
- * called on q.All(), allowing you to implement custom slice scan based
- * on the All() argument and/or result.
+ * DB returns the sql.DB instance encapsulated by dbx.DB.
*/
- withAllHook(fn: AllHookFunc): (Query)
+ db(): (sql.DB)
}
- interface Query {
+ interface DB {
/**
- * Params returns the parameters to be bound to the SQL statement represented by this query.
+ * Close closes the database, releasing any open resources.
+ * It is rare to Close a DB, as the DB handle is meant to be
+ * long-lived and shared between many goroutines.
*/
- params(): Params
+ close(): void
}
- interface Query {
+ interface DB {
/**
- * Prepare creates a prepared statement for later queries or executions.
- * Close() should be called after finishing all queries.
+ * Begin starts a transaction.
*/
- prepare(): (Query)
+ begin(): (Tx)
}
- interface Query {
+ interface DB {
/**
- * Close closes the underlying prepared statement.
- * Close does nothing if the query has not been prepared before.
+ * BeginTx starts a transaction with the given context and transaction options.
*/
- close(): void
+ beginTx(ctx: context.Context, opts: sql.TxOptions): (Tx)
}
- interface Query {
+ interface DB {
/**
- * Bind sets the parameters that should be bound to the SQL statement.
- * The parameter placeholders in the SQL statement are in the format of "{:ParamName}".
+ * Wrap encapsulates an existing transaction.
*/
- bind(params: Params): (Query)
+ wrap(sqlTx: sql.Tx): (Tx)
}
- interface Query {
+ interface DB {
/**
- * Execute executes the SQL statement without retrieving data.
+ * Transactional starts a transaction and executes the given function.
+ * If the function returns an error, the transaction will be rolled back.
+ * Otherwise, the transaction will be committed.
*/
- execute(): sql.Result
+ transactional(f: (_arg0: Tx) => void): void
}
- interface Query {
+ interface DB {
/**
- * One executes the SQL statement and populates the first row of the result into a struct or NullStringMap.
- * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how to specify
- * the variable to be populated.
- * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
+ * TransactionalContext starts a transaction and executes the given function with the given context and transaction options.
+ * If the function returns an error, the transaction will be rolled back.
+ * Otherwise, the transaction will be committed.
*/
- one(a: {
- }): void
+ transactionalContext(ctx: context.Context, opts: sql.TxOptions, f: (_arg0: Tx) => void): void
}
- interface Query {
+ interface DB {
/**
- * All executes the SQL statement and populates all the resulting rows into a slice of struct or NullStringMap.
- * The slice must be given as a pointer. Each slice element must be either a struct or a NullStringMap.
- * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how each slice element can be.
- * If the query returns no row, the slice will be an empty slice (not nil).
+ * DriverName returns the name of the DB driver.
*/
- all(slice: {
- }): void
+ driverName(): string
}
- interface Query {
+ interface DB {
/**
- * Row executes the SQL statement and populates the first row of the result into a list of variables.
- * Note that the number of the variables should match to that of the columns in the query result.
- * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
- */
- row(...a: {
- }[]): void
+ * QuoteTableName quotes the given table name appropriately.
+ * If the table name contains DB schema prefix, it will be handled accordingly.
+ * This method will do nothing if the table name is already quoted or if it contains parenthesis.
+ */
+ quoteTableName(s: string): string
}
- interface Query {
+ interface DB {
/**
- * Column executes the SQL statement and populates the first column of the result into a slice.
- * Note that the parameter must be a pointer to a slice.
+ * QuoteColumnName quotes the given column name appropriately.
+ * If the table name contains table name prefix, it will be handled accordingly.
+ * This method will do nothing if the column name is already quoted or if it contains parenthesis.
*/
- column(a: {
- }): void
+ quoteColumnName(s: string): string
}
- interface Query {
+ interface Errors {
/**
- * Rows executes the SQL statement and returns a Rows object to allow retrieving data row by row.
+ * Error returns the error string of Errors.
*/
- rows(): (Rows)
+ error(): string
}
/**
- * QueryBuilder builds different clauses for a SELECT SQL statement.
+ * Expression represents a DB expression that can be embedded in a SQL statement.
*/
- interface QueryBuilder {
+ interface Expression {
[key:string]: any;
/**
- * BuildSelect generates a SELECT clause from the given selected column names.
- */
- buildSelect(cols: Array, distinct: boolean, option: string): string
- /**
- * BuildFrom generates a FROM clause from the given tables.
- */
- buildFrom(tables: Array): string
- /**
- * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
- */
- buildGroupBy(cols: Array): string
- /**
- * BuildJoin generates a JOIN clause from the given join information.
+ * Build converts an expression into a SQL fragment.
+ * If the expression contains binding parameters, they will be added to the given Params.
*/
- buildJoin(_arg0: Array, _arg1: Params): string
+ build(_arg0: DB, _arg1: Params): string
+ }
+ /**
+ * HashExp represents a hash expression.
+ *
+ * A hash expression is a map whose keys are DB column names which need to be filtered according
+ * to the corresponding values. For example, HashExp{"level": 2, "dept": 10} will generate
+ * the SQL: "level"=2 AND "dept"=10.
+ *
+ * HashExp also handles nil values and slice values. For example, HashExp{"level": []interface{}{1, 2}, "dept": nil}
+ * will generate: "level" IN (1, 2) AND "dept" IS NULL.
+ */
+ interface HashExp extends _TygojaDict{}
+ interface newExp {
/**
- * BuildWhere generates a WHERE clause from the given expression.
+ * NewExp generates an expression with the specified SQL fragment and the optional binding parameters.
*/
- buildWhere(_arg0: Expression, _arg1: Params): string
+ (e: string, ...params: Params[]): Expression
+ }
+ interface not {
/**
- * BuildHaving generates a HAVING clause from the given expression.
+ * Not generates a NOT expression which prefixes "NOT" to the specified expression.
*/
- buildHaving(_arg0: Expression, _arg1: Params): string
+ (e: Expression): Expression
+ }
+ interface and {
/**
- * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ * And generates an AND expression which concatenates the given expressions with "AND".
*/
- buildOrderByAndLimit(_arg0: string, _arg1: Array, _arg2: number, _arg3: number): string
+ (...exps: Expression[]): Expression
+ }
+ interface or {
/**
- * BuildUnion generates a UNION clause from the given union information.
+ * Or generates an OR expression which concatenates the given expressions with "OR".
*/
- buildUnion(_arg0: Array, _arg1: Params): string
- }
- /**
- * BaseQueryBuilder provides a basic implementation of QueryBuilder.
- */
- interface BaseQueryBuilder {
+ (...exps: Expression[]): Expression
}
- interface newBaseQueryBuilder {
+ interface _in {
/**
- * NewBaseQueryBuilder creates a new BaseQueryBuilder instance.
+ * In generates an IN expression for the specified column and the list of allowed values.
+ * If values is empty, a SQL "0=1" will be generated which represents a false expression.
*/
- (db: DB): (BaseQueryBuilder)
+ (col: string, ...values: {
+ }[]): Expression
}
- interface BaseQueryBuilder {
+ interface notIn {
/**
- * DB returns the DB instance associated with the query builder.
+ * NotIn generates an NOT IN expression for the specified column and the list of disallowed values.
+ * If values is empty, an empty string will be returned indicating a true expression.
*/
- db(): (DB)
+ (col: string, ...values: {
+ }[]): Expression
}
- interface BaseQueryBuilder {
+ interface like {
/**
- * BuildSelect generates a SELECT clause from the given selected column names.
+ * Like generates a LIKE expression for the specified column and the possible strings that the column should be like.
+ * If multiple values are present, the column should be like *all* of them. For example, Like("name", "key", "word")
+ * will generate a SQL expression: "name" LIKE "%key%" AND "name" LIKE "%word%".
+ *
+ * By default, each value will be surrounded by "%" to enable partial matching. If a value contains special characters
+ * such as "%", "\", "_", they will also be properly escaped.
+ *
+ * You may call Escape() and/or Match() to change the default behavior. For example, Like("name", "key").Match(false, true)
+ * generates "name" LIKE "key%".
*/
- buildSelect(cols: Array, distinct: boolean, option: string): string
+ (col: string, ...values: string[]): (LikeExp)
}
- interface BaseQueryBuilder {
+ interface notLike {
/**
- * BuildFrom generates a FROM clause from the given tables.
+ * NotLike generates a NOT LIKE expression.
+ * For example, NotLike("name", "key", "word") will generate a SQL expression:
+ * "name" NOT LIKE "%key%" AND "name" NOT LIKE "%word%". Please see Like() for more details.
*/
- buildFrom(tables: Array): string
+ (col: string, ...values: string[]): (LikeExp)
}
- interface BaseQueryBuilder {
+ interface orLike {
/**
- * BuildJoin generates a JOIN clause from the given join information.
+ * OrLike generates an OR LIKE expression.
+ * This is similar to Like() except that the column should be like one of the possible values.
+ * For example, OrLike("name", "key", "word") will generate a SQL expression:
+ * "name" LIKE "%key%" OR "name" LIKE "%word%". Please see Like() for more details.
*/
- buildJoin(joins: Array, params: Params): string
+ (col: string, ...values: string[]): (LikeExp)
}
- interface BaseQueryBuilder {
+ interface orNotLike {
/**
- * BuildWhere generates a WHERE clause from the given expression.
+ * OrNotLike generates an OR NOT LIKE expression.
+ * For example, OrNotLike("name", "key", "word") will generate a SQL expression:
+ * "name" NOT LIKE "%key%" OR "name" NOT LIKE "%word%". Please see Like() for more details.
*/
- buildWhere(e: Expression, params: Params): string
+ (col: string, ...values: string[]): (LikeExp)
}
- interface BaseQueryBuilder {
+ interface exists {
/**
- * BuildHaving generates a HAVING clause from the given expression.
+ * Exists generates an EXISTS expression by prefixing "EXISTS" to the given expression.
*/
- buildHaving(e: Expression, params: Params): string
+ (exp: Expression): Expression
}
- interface BaseQueryBuilder {
+ interface notExists {
/**
- * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
+ * NotExists generates an EXISTS expression by prefixing "NOT EXISTS" to the given expression.
*/
- buildGroupBy(cols: Array): string
+ (exp: Expression): Expression
}
- interface BaseQueryBuilder {
+ interface between {
/**
- * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ * Between generates a BETWEEN expression.
+ * For example, Between("age", 10, 30) generates: "age" BETWEEN 10 AND 30
*/
- buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
+ (col: string, from: {
+ }, to: {
+ }): Expression
}
- interface BaseQueryBuilder {
+ interface notBetween {
/**
- * BuildUnion generates a UNION clause from the given union information.
+ * NotBetween generates a NOT BETWEEN expression.
+ * For example, NotBetween("age", 10, 30) generates: "age" NOT BETWEEN 10 AND 30
*/
- buildUnion(unions: Array, params: Params): string
+ (col: string, from: {
+ }, to: {
+ }): Expression
}
- interface BaseQueryBuilder {
+ /**
+ * Exp represents an expression with a SQL fragment and a list of optional binding parameters.
+ */
+ interface Exp {
+ }
+ interface Exp {
/**
- * BuildOrderBy generates the ORDER BY clause.
+ * Build converts an expression into a SQL fragment.
*/
- buildOrderBy(cols: Array): string
+ build(db: DB, params: Params): string
}
- interface BaseQueryBuilder {
+ interface HashExp {
/**
- * BuildLimit generates the LIMIT clause.
+ * Build converts an expression into a SQL fragment.
*/
- buildLimit(limit: number, offset: number): string
+ build(db: DB, params: Params): string
}
/**
- * VarTypeError indicates a variable type error when trying to populating a variable with DB result.
+ * NotExp represents an expression that should prefix "NOT" to a specified expression.
*/
- interface VarTypeError extends String{}
- interface VarTypeError {
+ interface NotExp {
+ }
+ interface NotExp {
/**
- * Error returns the error message.
+ * Build converts an expression into a SQL fragment.
*/
- error(): string
+ build(db: DB, params: Params): string
}
/**
- * NullStringMap is a map of sql.NullString that can be used to hold DB query result.
- * The map keys correspond to the DB column names, while the map values are their corresponding column values.
- */
- interface NullStringMap extends _TygojaDict{}
- /**
- * Rows enhances sql.Rows by providing additional data query methods.
- * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row.
+ * AndOrExp represents an expression that concatenates multiple expressions using either "AND" or "OR".
*/
- type _subkUEGQ = sql.Rows
- interface Rows extends _subkUEGQ {
+ interface AndOrExp {
}
- interface Rows {
+ interface AndOrExp {
/**
- * ScanMap populates the current row of data into a NullStringMap.
- * Note that the NullStringMap must not be nil, or it will panic.
- * The NullStringMap will be populated using column names as keys and their values as
- * the corresponding element values.
+ * Build converts an expression into a SQL fragment.
*/
- scanMap(a: NullStringMap): void
+ build(db: DB, params: Params): string
}
- interface Rows {
+ /**
+ * InExp represents an "IN" or "NOT IN" expression.
+ */
+ interface InExp {
+ }
+ interface InExp {
/**
- * ScanStruct populates the current row of data into a struct.
- * The struct must be given as a pointer.
- *
- * ScanStruct associates struct fields with DB table columns through a field mapping function.
- * It populates a struct field with the data of its associated column.
- * Note that only exported struct fields will be populated.
- *
- * By default, DefaultFieldMapFunc() is used to map struct fields to table columns.
- * This function separates each word in a field name with a underscore and turns every letter into lower case.
- * For example, "LastName" is mapped to "last_name", "MyID" is mapped to "my_id", and so on.
- * To change the default behavior, set DB.FieldMapper with your custom mapping function.
- * You may also set Query.FieldMapper to change the behavior for particular queries.
+ * Build converts an expression into a SQL fragment.
*/
- scanStruct(a: {
- }): void
+ build(db: DB, params: Params): string
}
/**
- * BuildHookFunc defines a callback function that is executed on Query creation.
- */
- interface BuildHookFunc {(q: Query): void }
- /**
- * SelectQuery represents a DB-agnostic SELECT query.
- * It can be built into a DB-specific query by calling the Build() method.
+ * LikeExp represents a variant of LIKE expressions.
*/
- interface SelectQuery {
+ interface LikeExp {
/**
- * FieldMapper maps struct field names to DB column names.
+ * Like stores the LIKE operator. It can be "LIKE", "NOT LIKE".
+ * It may also be customized as something like "ILIKE".
*/
- fieldMapper: FieldMapFunc
+ like: string
+ }
+ interface LikeExp {
/**
- * TableMapper maps structs to DB table names.
+ * Escape specifies how a LIKE expression should be escaped.
+ * Each string at position 2i represents a special character and the string at position 2i+1 is
+ * the corresponding escaped version.
*/
- tableMapper: TableMapFunc
+ escape(...chars: string[]): (LikeExp)
+ }
+ interface LikeExp {
+ /**
+ * Match specifies whether to do wildcard matching on the left and/or right of given strings.
+ */
+ match(left: boolean, right: boolean): (LikeExp)
+ }
+ interface LikeExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
}
/**
- * JoinInfo contains the specification for a JOIN clause.
+ * ExistsExp represents an EXISTS or NOT EXISTS expression.
*/
- interface JoinInfo {
- join: string
- table: string
- on: Expression
+ interface ExistsExp {
+ }
+ interface ExistsExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
}
/**
- * UnionInfo contains the specification for a UNION clause.
+ * BetweenExp represents a BETWEEN or a NOT BETWEEN expression.
*/
- interface UnionInfo {
- all: boolean
- query?: Query
+ interface BetweenExp {
}
- interface newSelectQuery {
+ interface BetweenExp {
/**
- * NewSelectQuery creates a new SelectQuery instance.
+ * Build converts an expression into a SQL fragment.
*/
- (builder: Builder, db: DB): (SelectQuery)
+ build(db: DB, params: Params): string
}
- interface SelectQuery {
+ interface enclose {
/**
- * WithBuildHook runs the provided hook function with the query created on Build().
+ * Enclose surrounds the provided nonempty expression with parenthesis "()".
*/
- withBuildHook(fn: BuildHookFunc): (SelectQuery)
+ (exp: Expression): Expression
}
- interface SelectQuery {
+ /**
+ * EncloseExp represents a parenthesis enclosed expression.
+ */
+ interface EncloseExp {
+ }
+ interface EncloseExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * TableModel is the interface that should be implemented by models which have unconventional table names.
+ */
+ interface TableModel {
+ [key:string]: any;
+ tableName(): string
+ }
+ /**
+ * ModelQuery represents a query associated with a struct model.
+ */
+ interface ModelQuery {
+ }
+ interface newModelQuery {
+ (model: {
+ }, fieldMapFunc: FieldMapFunc, db: DB, builder: Builder): (ModelQuery)
+ }
+ interface ModelQuery {
/**
* Context returns the context associated with the query.
*/
context(): context.Context
}
- interface SelectQuery {
+ interface ModelQuery {
/**
* WithContext associates a context with the query.
*/
- withContext(ctx: context.Context): (SelectQuery)
+ withContext(ctx: context.Context): (ModelQuery)
}
- interface SelectQuery {
+ interface ModelQuery {
/**
- * Select specifies the columns to be selected.
- * Column names will be automatically quoted.
+ * Exclude excludes the specified struct fields from being inserted/updated into the DB table.
*/
- select(...cols: string[]): (SelectQuery)
+ exclude(...attrs: string[]): (ModelQuery)
}
- interface SelectQuery {
+ interface ModelQuery {
/**
- * AndSelect adds additional columns to be selected.
- * Column names will be automatically quoted.
+ * Insert inserts a row in the table using the struct model associated with this query.
+ *
+ * By default, it inserts *all* public fields into the table, including those nil or empty ones.
+ * You may pass a list of the fields to this method to indicate that only those fields should be inserted.
+ * You may also call Exclude to exclude some fields from being inserted.
+ *
+ * If a model has an empty primary key, it is considered auto-incremental and the corresponding struct
+ * field will be filled with the generated primary key value after a successful insertion.
*/
- andSelect(...cols: string[]): (SelectQuery)
+ insert(...attrs: string[]): void
}
- interface SelectQuery {
+ interface ModelQuery {
/**
- * Distinct specifies whether to select columns distinctively.
- * By default, distinct is false.
+ * Update updates a row in the table using the struct model associated with this query.
+ * The row being updated has the same primary key as specified by the model.
+ *
+ * By default, it updates *all* public fields in the table, including those nil or empty ones.
+ * You may pass a list of the fields to this method to indicate that only those fields should be updated.
+ * You may also call Exclude to exclude some fields from being updated.
*/
- distinct(v: boolean): (SelectQuery)
+ update(...attrs: string[]): void
}
- interface SelectQuery {
+ interface ModelQuery {
/**
- * SelectOption specifies additional option that should be append to "SELECT".
+ * Delete deletes a row in the table using the primary key specified by the struct model associated with this query.
*/
- selectOption(option: string): (SelectQuery)
+ delete(): void
}
- interface SelectQuery {
+ /**
+ * ExecHookFunc executes before op allowing custom handling like auto fail/retry.
+ */
+ interface ExecHookFunc {(q: Query, op: () => void): void }
+ /**
+ * OneHookFunc executes right before the query populate the row result from One() call (aka. op).
+ */
+ interface OneHookFunc {(q: Query, a: {
+ }, op: (b: {
+ }) => void): void }
+ /**
+ * AllHookFunc executes right before the query populate the row result from All() call (aka. op).
+ */
+ interface AllHookFunc {(q: Query, sliceA: {
+ }, op: (sliceB: {
+ }) => void): void }
+ /**
+ * Params represents a list of parameter values to be bound to a SQL statement.
+ * The map keys are the parameter names while the map values are the corresponding parameter values.
+ */
+ interface Params extends _TygojaDict{}
+ /**
+ * Executor prepares, executes, or queries a SQL statement.
+ */
+ interface Executor {
+ [key:string]: any;
/**
- * From specifies which tables to select from.
- * Table names will be automatically quoted.
+ * Exec executes a SQL statement
*/
- from(...tables: string[]): (SelectQuery)
- }
- interface SelectQuery {
+ exec(query: string, ...args: {
+ }[]): sql.Result
/**
- * Where specifies the WHERE condition.
+ * ExecContext executes a SQL statement with the given context
*/
- where(e: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ execContext(ctx: context.Context, query: string, ...args: {
+ }[]): sql.Result
/**
- * AndWhere concatenates a new WHERE condition with the existing one (if any) using "AND".
+ * Query queries a SQL statement
*/
- andWhere(e: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ query(query: string, ...args: {
+ }[]): (sql.Rows)
/**
- * OrWhere concatenates a new WHERE condition with the existing one (if any) using "OR".
+ * QueryContext queries a SQL statement with the given context
*/
- orWhere(e: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ queryContext(ctx: context.Context, query: string, ...args: {
+ }[]): (sql.Rows)
/**
- * Join specifies a JOIN clause.
- * The "typ" parameter specifies the JOIN type (e.g. "INNER JOIN", "LEFT JOIN").
+ * Prepare creates a prepared statement
*/
- join(typ: string, table: string, on: Expression): (SelectQuery)
+ prepare(query: string): (sql.Stmt)
}
- interface SelectQuery {
+ /**
+ * Query represents a SQL statement to be executed.
+ */
+ interface Query {
/**
- * InnerJoin specifies an INNER JOIN clause.
- * This is a shortcut method for Join.
+ * FieldMapper maps struct field names to DB column names.
*/
- innerJoin(table: string, on: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ fieldMapper: FieldMapFunc
/**
- * LeftJoin specifies a LEFT JOIN clause.
- * This is a shortcut method for Join.
+ * LastError contains the last error (if any) of the query.
+ * LastError is cleared by Execute(), Row(), Rows(), One(), and All().
*/
- leftJoin(table: string, on: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ lastError: Error
/**
- * RightJoin specifies a RIGHT JOIN clause.
- * This is a shortcut method for Join.
+ * LogFunc is used to log the SQL statement being executed.
*/
- rightJoin(table: string, on: Expression): (SelectQuery)
- }
- interface SelectQuery {
+ logFunc: LogFunc
/**
- * OrderBy specifies the ORDER BY clause.
- * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
+ * PerfFunc is used to log the SQL execution time. It is ignored if nil.
+ * Deprecated: Please use QueryLogFunc and ExecLogFunc instead.
*/
- orderBy(...cols: string[]): (SelectQuery)
- }
- interface SelectQuery {
+ perfFunc: PerfFunc
/**
- * AndOrderBy appends additional columns to the existing ORDER BY clause.
- * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
+ * QueryLogFunc is called each time when performing a SQL query that returns data.
*/
- andOrderBy(...cols: string[]): (SelectQuery)
- }
- interface SelectQuery {
+ queryLogFunc: QueryLogFunc
/**
- * GroupBy specifies the GROUP BY clause.
- * Column names will be properly quoted.
+ * ExecLogFunc is called each time when a SQL statement is executed.
*/
- groupBy(...cols: string[]): (SelectQuery)
+ execLogFunc: ExecLogFunc
}
- interface SelectQuery {
+ interface newQuery {
/**
- * AndGroupBy appends additional columns to the existing GROUP BY clause.
- * Column names will be properly quoted.
+ * NewQuery creates a new Query with the given SQL statement.
*/
- andGroupBy(...cols: string[]): (SelectQuery)
+ (db: DB, executor: Executor, sql: string): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Having specifies the HAVING clause.
+ * SQL returns the original SQL used to create the query.
+ * The actual SQL (RawSQL) being executed is obtained by replacing the named
+ * parameter placeholders with anonymous ones.
*/
- having(e: Expression): (SelectQuery)
+ sql(): string
}
- interface SelectQuery {
+ interface Query {
/**
- * AndHaving concatenates a new HAVING condition with the existing one (if any) using "AND".
+ * Context returns the context associated with the query.
*/
- andHaving(e: Expression): (SelectQuery)
+ context(): context.Context
}
- interface SelectQuery {
+ interface Query {
/**
- * OrHaving concatenates a new HAVING condition with the existing one (if any) using "OR".
+ * WithContext associates a context with the query.
*/
- orHaving(e: Expression): (SelectQuery)
+ withContext(ctx: context.Context): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Union specifies a UNION clause.
- */
- union(q: Query): (SelectQuery)
- }
- interface SelectQuery {
- /**
- * UnionAll specifies a UNION ALL clause.
+ * WithExecHook associates the provided exec hook function with the query.
+ *
+ * It is called for every Query resolver (Execute(), One(), All(), Row(), Column()),
+ * allowing you to implement auto fail/retry or any other additional handling.
*/
- unionAll(q: Query): (SelectQuery)
+ withExecHook(fn: ExecHookFunc): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Limit specifies the LIMIT clause.
- * A negative limit means no limit.
+ * WithOneHook associates the provided hook function with the query,
+ * called on q.One(), allowing you to implement custom struct scan based
+ * on the One() argument and/or result.
*/
- limit(limit: number): (SelectQuery)
+ withOneHook(fn: OneHookFunc): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Offset specifies the OFFSET clause.
- * A negative offset means no offset.
+ * WithOneHook associates the provided hook function with the query,
+ * called on q.All(), allowing you to implement custom slice scan based
+ * on the All() argument and/or result.
*/
- offset(offset: number): (SelectQuery)
+ withAllHook(fn: AllHookFunc): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Bind specifies the parameter values to be bound to the query.
+ * Params returns the parameters to be bound to the SQL statement represented by this query.
*/
- bind(params: Params): (SelectQuery)
+ params(): Params
}
- interface SelectQuery {
+ interface Query {
/**
- * AndBind appends additional parameters to be bound to the query.
+ * Prepare creates a prepared statement for later queries or executions.
+ * Close() should be called after finishing all queries.
*/
- andBind(params: Params): (SelectQuery)
+ prepare(): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Build builds the SELECT query and returns an executable Query object.
+ * Close closes the underlying prepared statement.
+ * Close does nothing if the query has not been prepared before.
*/
- build(): (Query)
+ close(): void
}
- interface SelectQuery {
+ interface Query {
/**
- * One executes the SELECT query and populates the first row of the result into the specified variable.
- *
- * If the query does not specify a "from" clause, the method will try to infer the name of the table
- * to be selected from by calling getTableName() which will return either the variable type name
- * or the TableName() method if the variable implements the TableModel interface.
- *
- * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
+ * Bind sets the parameters that should be bound to the SQL statement.
+ * The parameter placeholders in the SQL statement are in the format of "{:ParamName}".
*/
- one(a: {
- }): void
+ bind(params: Params): (Query)
}
- interface SelectQuery {
+ interface Query {
/**
- * Model selects the row with the specified primary key and populates the model with the row data.
- *
- * The model variable should be a pointer to a struct. If the query does not specify a "from" clause,
- * it will use the model struct to determine which table to select data from. It will also use the model
- * to infer the name of the primary key column. Only simple primary key is supported. For composite primary keys,
- * please use Where() to specify the filtering condition.
+ * Execute executes the SQL statement without retrieving data.
*/
- model(pk: {
- }): void
+ execute(): sql.Result
}
- interface SelectQuery {
+ interface Query {
/**
- * All executes the SELECT query and populates all rows of the result into a slice.
- *
- * Note that the slice must be passed in as a pointer.
- *
- * If the query does not specify a "from" clause, the method will try to infer the name of the table
- * to be selected from by calling getTableName() which will return either the type name of the slice elements
- * or the TableName() method if the slice element implements the TableModel interface.
+ * One executes the SQL statement and populates the first row of the result into a struct or NullStringMap.
+ * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how to specify
+ * the variable to be populated.
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
*/
- all(slice: {
+ one(a: {
}): void
}
- interface SelectQuery {
+ interface Query {
/**
- * Rows builds and executes the SELECT query and returns a Rows object for data retrieval purpose.
- * This is a shortcut to SelectQuery.Build().Rows()
+ * All executes the SQL statement and populates all the resulting rows into a slice of struct or NullStringMap.
+ * The slice must be given as a pointer. Each slice element must be either a struct or a NullStringMap.
+ * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how each slice element can be.
+ * If the query returns no row, the slice will be an empty slice (not nil).
*/
- rows(): (Rows)
+ all(slice: {
+ }): void
}
- interface SelectQuery {
+ interface Query {
/**
- * Row builds and executes the SELECT query and populates the first row of the result into the specified variables.
- * This is a shortcut to SelectQuery.Build().Row()
+ * Row executes the SQL statement and populates the first row of the result into a list of variables.
+ * Note that the number of the variables should match to that of the columns in the query result.
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
*/
row(...a: {
}[]): void
}
- interface SelectQuery {
+ interface Query {
/**
- * Column builds and executes the SELECT statement and populates the first column of the result into a slice.
+ * Column executes the SQL statement and populates the first column of the result into a slice.
* Note that the parameter must be a pointer to a slice.
- * This is a shortcut to SelectQuery.Build().Column()
*/
column(a: {
}): void
}
- /**
- * QueryInfo represents a debug/info struct with exported SelectQuery fields.
- */
- interface QueryInfo {
- builder: Builder
- selects: Array
- distinct: boolean
- selectOption: string
- from: Array
- where: Expression
- join: Array
- orderBy: Array
- groupBy: Array
- having: Expression
- union: Array
- limit: number
- offset: number
- params: Params
- context: context.Context
- buildHook: BuildHookFunc
- }
- interface SelectQuery {
+ interface Query {
/**
- * Info exports common SelectQuery fields allowing to inspect the
- * current select query options.
+ * Rows executes the SQL statement and returns a Rows object to allow retrieving data row by row.
*/
- info(): (QueryInfo)
+ rows(): (Rows)
}
/**
- * FieldMapFunc converts a struct field name into a DB column name.
+ * QueryBuilder builds different clauses for a SELECT SQL statement.
*/
- interface FieldMapFunc {(_arg0: string): string }
+ interface QueryBuilder {
+ [key:string]: any;
+ /**
+ * BuildSelect generates a SELECT clause from the given selected column names.
+ */
+ buildSelect(cols: Array, distinct: boolean, option: string): string
+ /**
+ * BuildFrom generates a FROM clause from the given tables.
+ */
+ buildFrom(tables: Array): string
+ /**
+ * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
+ */
+ buildGroupBy(cols: Array): string
+ /**
+ * BuildJoin generates a JOIN clause from the given join information.
+ */
+ buildJoin(_arg0: Array, _arg1: Params): string
+ /**
+ * BuildWhere generates a WHERE clause from the given expression.
+ */
+ buildWhere(_arg0: Expression, _arg1: Params): string
+ /**
+ * BuildHaving generates a HAVING clause from the given expression.
+ */
+ buildHaving(_arg0: Expression, _arg1: Params): string
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(_arg0: string, _arg1: Array, _arg2: number, _arg3: number): string
+ /**
+ * BuildUnion generates a UNION clause from the given union information.
+ */
+ buildUnion(_arg0: Array, _arg1: Params): string
+ }
/**
- * TableMapFunc converts a sample struct into a DB table name.
+ * BaseQueryBuilder provides a basic implementation of QueryBuilder.
*/
- interface TableMapFunc {(a: {
- }): string }
- interface structInfo {
+ interface BaseQueryBuilder {
}
- type _subtaShB = structInfo
- interface structValue extends _subtaShB {
+ interface newBaseQueryBuilder {
+ /**
+ * NewBaseQueryBuilder creates a new BaseQueryBuilder instance.
+ */
+ (db: DB): (BaseQueryBuilder)
}
- interface fieldInfo {
+ interface BaseQueryBuilder {
+ /**
+ * DB returns the DB instance associated with the query builder.
+ */
+ db(): (DB)
}
- interface structInfoMapKey {
+ interface BaseQueryBuilder {
+ /**
+ * BuildSelect generates a SELECT clause from the given selected column names.
+ */
+ buildSelect(cols: Array, distinct: boolean, option: string): string
}
- /**
- * PostScanner is an optional interface used by ScanStruct.
- */
- interface PostScanner {
- [key:string]: any;
+ interface BaseQueryBuilder {
/**
- * PostScan executes right after the struct has been populated
- * with the DB values, allowing you to further normalize or validate
- * the loaded data.
+ * BuildFrom generates a FROM clause from the given tables.
*/
- postScan(): void
+ buildFrom(tables: Array): string
}
- interface defaultFieldMapFunc {
+ interface BaseQueryBuilder {
/**
- * DefaultFieldMapFunc maps a field name to a DB column name.
- * The mapping rule set by this method is that words in a field name will be separated by underscores
- * and the name will be turned into lower case. For example, "FirstName" maps to "first_name", and "MyID" becomes "my_id".
- * See DB.FieldMapper for more details.
+ * BuildJoin generates a JOIN clause from the given join information.
*/
- (f: string): string
+ buildJoin(joins: Array, params: Params): string
}
- interface getTableName {
+ interface BaseQueryBuilder {
/**
- * GetTableName implements the default way of determining the table name corresponding to the given model struct
- * or slice of structs. To get the actual table name for a model, you should use DB.TableMapFunc() instead.
- * Do not call this method in a model's TableName() method because it will cause infinite loop.
+ * BuildWhere generates a WHERE clause from the given expression.
*/
- (a: {
- }): string
+ buildWhere(e: Expression, params: Params): string
}
- /**
- * Tx enhances sql.Tx with additional querying methods.
- */
- type _subgTasg = Builder
- interface Tx extends _subgTasg {
+ interface BaseQueryBuilder {
+ /**
+ * BuildHaving generates a HAVING clause from the given expression.
+ */
+ buildHaving(e: Expression, params: Params): string
}
- interface Tx {
+ interface BaseQueryBuilder {
/**
- * Commit commits the transaction.
+ * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
*/
- commit(): void
+ buildGroupBy(cols: Array): string
}
- interface Tx {
+ interface BaseQueryBuilder {
/**
- * Rollback aborts the transaction.
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
*/
- rollback(): void
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
}
-}
-
-/**
- * Package exec runs external commands. It wraps os.StartProcess to make it
- * easier to remap stdin and stdout, connect I/O with pipes, and do other
- * adjustments.
- *
- * Unlike the "system" library call from C and other languages, the
- * os/exec package intentionally does not invoke the system shell and
- * does not expand any glob patterns or handle other expansions,
- * pipelines, or redirections typically done by shells. The package
- * behaves more like C's "exec" family of functions. To expand glob
- * patterns, either call the shell directly, taking care to escape any
- * dangerous input, or use the path/filepath package's Glob function.
- * To expand environment variables, use package os's ExpandEnv.
- *
- * Note that the examples in this package assume a Unix system.
- * They may not run on Windows, and they do not run in the Go Playground
- * used by golang.org and godoc.org.
- *
- * # Executables in the current directory
- *
- * The functions Command and LookPath look for a program
- * in the directories listed in the current path, following the
- * conventions of the host operating system.
- * Operating systems have for decades included the current
- * directory in this search, sometimes implicitly and sometimes
- * configured explicitly that way by default.
- * Modern practice is that including the current directory
- * is usually unexpected and often leads to security problems.
- *
- * To avoid those security problems, as of Go 1.19, this package will not resolve a program
- * using an implicit or explicit path entry relative to the current directory.
- * That is, if you run exec.LookPath("go"), it will not successfully return
- * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
- * Instead, if the usual path algorithms would result in that answer,
- * these functions return an error err satisfying errors.Is(err, ErrDot).
- *
- * For example, consider these two program snippets:
- *
- * ```
- * path, err := exec.LookPath("prog")
- * if err != nil {
- * log.Fatal(err)
- * }
- * use(path)
- * ```
- *
- * and
- *
- * ```
- * cmd := exec.Command("prog")
- * if err := cmd.Run(); err != nil {
- * log.Fatal(err)
- * }
- * ```
- *
- * These will not find and run ./prog or .\prog.exe,
- * no matter how the current path is configured.
- *
- * Code that always wants to run a program from the current directory
- * can be rewritten to say "./prog" instead of "prog".
- *
- * Code that insists on including results from relative path entries
- * can instead override the error using an errors.Is check:
- *
- * ```
- * path, err := exec.LookPath("prog")
- * if errors.Is(err, exec.ErrDot) {
- * err = nil
- * }
- * if err != nil {
- * log.Fatal(err)
- * }
- * use(path)
- * ```
- *
- * and
- *
- * ```
- * cmd := exec.Command("prog")
- * if errors.Is(cmd.Err, exec.ErrDot) {
- * cmd.Err = nil
- * }
- * if err := cmd.Run(); err != nil {
- * log.Fatal(err)
- * }
- * ```
- *
- * Setting the environment variable GODEBUG=execerrdot=0
- * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
- * behavior for programs that are unable to apply more targeted fixes.
- * A future version of Go may remove support for this variable.
- *
- * Before adding such overrides, make sure you understand the
- * security implications of doing so.
- * See https://go.dev/blog/path-security for more information.
- */
-namespace exec {
- interface command {
+ interface BaseQueryBuilder {
/**
- * Command returns the Cmd struct to execute the named program with
- * the given arguments.
- *
- * It sets only the Path and Args in the returned structure.
- *
- * If name contains no path separators, Command uses LookPath to
- * resolve name to a complete path if possible. Otherwise it uses name
- * directly as Path.
- *
- * The returned Cmd's Args field is constructed from the command name
- * followed by the elements of arg, so arg should not include the
- * command name itself. For example, Command("echo", "hello").
- * Args[0] is always name, not the possibly resolved Path.
- *
- * On Windows, processes receive the whole command line as a single string
- * and do their own parsing. Command combines and quotes Args into a command
- * line string with an algorithm compatible with applications using
- * CommandLineToArgvW (which is the most common way). Notable exceptions are
- * msiexec.exe and cmd.exe (and thus, all batch files), which have a different
- * unquoting algorithm. In these or other similar cases, you can do the
- * quoting yourself and provide the full command line in SysProcAttr.CmdLine,
- * leaving Args empty.
+ * BuildUnion generates a UNION clause from the given union information.
*/
- (name: string, ...arg: string[]): (Cmd)
- }
-}
-
-namespace filesystem {
- /**
- * FileReader defines an interface for a file resource reader.
- */
- interface FileReader {
- [key:string]: any;
- open(): io.ReadSeekCloser
+ buildUnion(unions: Array, params: Params): string
}
- /**
- * File defines a single file [io.ReadSeekCloser] resource.
- *
- * The file could be from a local path, multipipart/formdata header, etc.
- */
- interface File {
- reader: FileReader
- name: string
- originalName: string
- size: number
+ interface BaseQueryBuilder {
+ /**
+ * BuildOrderBy generates the ORDER BY clause.
+ */
+ buildOrderBy(cols: Array): string
}
- interface newFileFromPath {
+ interface BaseQueryBuilder {
/**
- * NewFileFromPath creates a new File instance from the provided local file path.
+ * BuildLimit generates the LIMIT clause.
*/
- (path: string): (File)
+ buildLimit(limit: number, offset: number): string
}
- interface newFileFromBytes {
+ /**
+ * VarTypeError indicates a variable type error when trying to populating a variable with DB result.
+ */
+ interface VarTypeError extends String{}
+ interface VarTypeError {
/**
- * NewFileFromBytes creates a new File instance from the provided byte slice.
+ * Error returns the error message.
*/
- (b: string|Array, name: string): (File)
+ error(): string
}
- interface newFileFromMultipart {
+ /**
+ * NullStringMap is a map of sql.NullString that can be used to hold DB query result.
+ * The map keys correspond to the DB column names, while the map values are their corresponding column values.
+ */
+ interface NullStringMap extends _TygojaDict{}
+ /**
+ * Rows enhances sql.Rows by providing additional data query methods.
+ * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row.
+ */
+ type _subrMzGi = sql.Rows
+ interface Rows extends _subrMzGi {
+ }
+ interface Rows {
/**
- * NewFileFromMultipart creates a new File from the provided multipart header.
+ * ScanMap populates the current row of data into a NullStringMap.
+ * Note that the NullStringMap must not be nil, or it will panic.
+ * The NullStringMap will be populated using column names as keys and their values as
+ * the corresponding element values.
*/
- (mh: multipart.FileHeader): (File)
+ scanMap(a: NullStringMap): void
}
- interface newFileFromUrl {
+ interface Rows {
/**
- * NewFileFromUrl creates a new File from the provided url by
- * downloading the resource and load it as BytesReader.
- *
- * Example
+ * ScanStruct populates the current row of data into a struct.
+ * The struct must be given as a pointer.
*
- * ```
- * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- * defer cancel()
+ * ScanStruct associates struct fields with DB table columns through a field mapping function.
+ * It populates a struct field with the data of its associated column.
+ * Note that only exported struct fields will be populated.
*
- * file, err := filesystem.NewFileFromUrl(ctx, "https://example.com/image.png")
- * ```
+ * By default, DefaultFieldMapFunc() is used to map struct fields to table columns.
+ * This function separates each word in a field name with a underscore and turns every letter into lower case.
+ * For example, "LastName" is mapped to "last_name", "MyID" is mapped to "my_id", and so on.
+ * To change the default behavior, set DB.FieldMapper with your custom mapping function.
+ * You may also set Query.FieldMapper to change the behavior for particular queries.
*/
- (ctx: context.Context, url: string): (File)
+ scanStruct(a: {
+ }): void
}
/**
- * MultipartReader defines a FileReader from [multipart.FileHeader].
+ * BuildHookFunc defines a callback function that is executed on Query creation.
*/
- interface MultipartReader {
- header?: multipart.FileHeader
- }
- interface MultipartReader {
+ interface BuildHookFunc {(q: Query): void }
+ /**
+ * SelectQuery represents a DB-agnostic SELECT query.
+ * It can be built into a DB-specific query by calling the Build() method.
+ */
+ interface SelectQuery {
/**
- * Open implements the [filesystem.FileReader] interface.
+ * FieldMapper maps struct field names to DB column names.
*/
- open(): io.ReadSeekCloser
+ fieldMapper: FieldMapFunc
+ /**
+ * TableMapper maps structs to DB table names.
+ */
+ tableMapper: TableMapFunc
}
/**
- * PathReader defines a FileReader from a local file path.
+ * JoinInfo contains the specification for a JOIN clause.
*/
- interface PathReader {
- path: string
- }
- interface PathReader {
- /**
- * Open implements the [filesystem.FileReader] interface.
- */
- open(): io.ReadSeekCloser
+ interface JoinInfo {
+ join: string
+ table: string
+ on: Expression
}
/**
- * BytesReader defines a FileReader from bytes content.
+ * UnionInfo contains the specification for a UNION clause.
*/
- interface BytesReader {
- bytes: string|Array
+ interface UnionInfo {
+ all: boolean
+ query?: Query
}
- interface BytesReader {
+ interface newSelectQuery {
/**
- * Open implements the [filesystem.FileReader] interface.
+ * NewSelectQuery creates a new SelectQuery instance.
*/
- open(): io.ReadSeekCloser
- }
- type _subHVtTs = bytes.Reader
- interface bytesReadSeekCloser extends _subHVtTs {
+ (builder: Builder, db: DB): (SelectQuery)
}
- interface bytesReadSeekCloser {
+ interface SelectQuery {
/**
- * Close implements the [io.ReadSeekCloser] interface.
+ * WithBuildHook runs the provided hook function with the query created on Build().
*/
- close(): void
- }
- interface System {
+ withBuildHook(fn: BuildHookFunc): (SelectQuery)
}
- interface newS3 {
+ interface SelectQuery {
/**
- * NewS3 initializes an S3 filesystem instance.
- *
- * NB! Make sure to call `Close()` after you are done working with it.
+ * Context returns the context associated with the query.
*/
- (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System)
+ context(): context.Context
}
- interface newLocal {
+ interface SelectQuery {
/**
- * NewLocal initializes a new local filesystem instance.
- *
- * NB! Make sure to call `Close()` after you are done working with it.
+ * WithContext associates a context with the query.
*/
- (dirPath: string): (System)
+ withContext(ctx: context.Context): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * SetContext assigns the specified context to the current filesystem.
+ * Select specifies the columns to be selected.
+ * Column names will be automatically quoted.
*/
- setContext(ctx: context.Context): void
+ select(...cols: string[]): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Close releases any resources used for the related filesystem.
+ * AndSelect adds additional columns to be selected.
+ * Column names will be automatically quoted.
*/
- close(): void
+ andSelect(...cols: string[]): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Exists checks if file with fileKey path exists or not.
+ * Distinct specifies whether to select columns distinctively.
+ * By default, distinct is false.
*/
- exists(fileKey: string): boolean
+ distinct(v: boolean): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Attributes returns the attributes for the file with fileKey path.
+ * SelectOption specifies additional option that should be append to "SELECT".
*/
- attributes(fileKey: string): (blob.Attributes)
+ selectOption(option: string): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * GetFile returns a file content reader for the given fileKey.
- *
- * NB! Make sure to call `Close()` after you are done working with it.
+ * From specifies which tables to select from.
+ * Table names will be automatically quoted.
*/
- getFile(fileKey: string): (blob.Reader)
+ from(...tables: string[]): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Copy copies the file stored at srcKey to dstKey.
- *
- * If dstKey file already exists, it is overwritten.
+ * Where specifies the WHERE condition.
*/
- copy(srcKey: string): void
+ where(e: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * List returns a flat list with info for all files under the specified prefix.
+ * AndWhere concatenates a new WHERE condition with the existing one (if any) using "AND".
*/
- list(prefix: string): Array<(blob.ListObject | undefined)>
+ andWhere(e: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Upload writes content into the fileKey location.
+ * OrWhere concatenates a new WHERE condition with the existing one (if any) using "OR".
*/
- upload(content: string|Array, fileKey: string): void
+ orWhere(e: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * UploadFile uploads the provided multipart file to the fileKey location.
+ * Join specifies a JOIN clause.
+ * The "typ" parameter specifies the JOIN type (e.g. "INNER JOIN", "LEFT JOIN").
*/
- uploadFile(file: File, fileKey: string): void
+ join(typ: string, table: string, on: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * UploadMultipart uploads the provided multipart file to the fileKey location.
+ * InnerJoin specifies an INNER JOIN clause.
+ * This is a shortcut method for Join.
*/
- uploadMultipart(fh: multipart.FileHeader, fileKey: string): void
+ innerJoin(table: string, on: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Delete deletes stored file at fileKey location.
+ * LeftJoin specifies a LEFT JOIN clause.
+ * This is a shortcut method for Join.
*/
- delete(fileKey: string): void
+ leftJoin(table: string, on: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * DeletePrefix deletes everything starting with the specified prefix.
+ * RightJoin specifies a RIGHT JOIN clause.
+ * This is a shortcut method for Join.
*/
- deletePrefix(prefix: string): Array
+ rightJoin(table: string, on: Expression): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * Serve serves the file at fileKey location to an HTTP response.
- *
- * If the `download` query parameter is used the file will be always served for
- * download no matter of its type (aka. with "Content-Disposition: attachment").
+ * OrderBy specifies the ORDER BY clause.
+ * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
*/
- serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void
+ orderBy(...cols: string[]): (SelectQuery)
}
- interface System {
+ interface SelectQuery {
/**
- * CreateThumb creates a new thumb image for the file at originalKey location.
- * The new thumb file is stored at thumbKey location.
- *
- * thumbSize is in the format:
- * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio
- * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio
- * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center)
- * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top)
- * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom)
- * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping)
+ * AndOrderBy appends additional columns to the existing ORDER BY clause.
+ * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
*/
- createThumb(originalKey: string, thumbKey: string): void
+ andOrderBy(...cols: string[]): (SelectQuery)
}
-}
-
-/**
- * Package tokens implements various user and admin tokens generation methods.
- */
-namespace tokens {
- interface newAdminAuthToken {
+ interface SelectQuery {
/**
- * NewAdminAuthToken generates and returns a new admin authentication token.
+ * GroupBy specifies the GROUP BY clause.
+ * Column names will be properly quoted.
*/
- (app: CoreApp, admin: models.Admin): string
+ groupBy(...cols: string[]): (SelectQuery)
}
- interface newAdminResetPasswordToken {
+ interface SelectQuery {
/**
- * NewAdminResetPasswordToken generates and returns a new admin password reset request token.
+ * AndGroupBy appends additional columns to the existing GROUP BY clause.
+ * Column names will be properly quoted.
*/
- (app: CoreApp, admin: models.Admin): string
+ andGroupBy(...cols: string[]): (SelectQuery)
}
- interface newAdminFileToken {
+ interface SelectQuery {
/**
- * NewAdminFileToken generates and returns a new admin private file access token.
+ * Having specifies the HAVING clause.
*/
- (app: CoreApp, admin: models.Admin): string
+ having(e: Expression): (SelectQuery)
}
- interface newRecordAuthToken {
+ interface SelectQuery {
/**
- * NewRecordAuthToken generates and returns a new auth record authentication token.
+ * AndHaving concatenates a new HAVING condition with the existing one (if any) using "AND".
*/
- (app: CoreApp, record: models.Record): string
+ andHaving(e: Expression): (SelectQuery)
}
- interface newRecordVerifyToken {
+ interface SelectQuery {
/**
- * NewRecordVerifyToken generates and returns a new record verification token.
+ * OrHaving concatenates a new HAVING condition with the existing one (if any) using "OR".
*/
- (app: CoreApp, record: models.Record): string
+ orHaving(e: Expression): (SelectQuery)
}
- interface newRecordResetPasswordToken {
+ interface SelectQuery {
/**
- * NewRecordResetPasswordToken generates and returns a new auth record password reset request token.
+ * Union specifies a UNION clause.
*/
- (app: CoreApp, record: models.Record): string
+ union(q: Query): (SelectQuery)
}
- interface newRecordChangeEmailToken {
+ interface SelectQuery {
/**
- * NewRecordChangeEmailToken generates and returns a new auth record change email request token.
+ * UnionAll specifies a UNION ALL clause.
*/
- (app: CoreApp, record: models.Record, newEmail: string): string
+ unionAll(q: Query): (SelectQuery)
}
- interface newRecordFileToken {
+ interface SelectQuery {
/**
- * NewRecordFileToken generates and returns a new record private file access token.
+ * Limit specifies the LIMIT clause.
+ * A negative limit means no limit.
*/
- (app: CoreApp, record: models.Record): string
+ limit(limit: number): (SelectQuery)
}
-}
-
-/**
- * Package mails implements various helper methods for sending user and admin
- * emails like forgotten password, verification, etc.
- */
-namespace mails {
- interface sendAdminPasswordReset {
+ interface SelectQuery {
/**
- * SendAdminPasswordReset sends a password reset request email to the specified admin.
+ * Offset specifies the OFFSET clause.
+ * A negative offset means no offset.
*/
- (app: CoreApp, admin: models.Admin): void
+ offset(offset: number): (SelectQuery)
}
- interface sendRecordPasswordReset {
+ interface SelectQuery {
/**
- * SendRecordPasswordReset sends a password reset request email to the specified user.
+ * Bind specifies the parameter values to be bound to the query.
*/
- (app: CoreApp, authRecord: models.Record): void
+ bind(params: Params): (SelectQuery)
}
- interface sendRecordVerification {
+ interface SelectQuery {
/**
- * SendRecordVerification sends a verification request email to the specified user.
+ * AndBind appends additional parameters to be bound to the query.
*/
- (app: CoreApp, authRecord: models.Record): void
+ andBind(params: Params): (SelectQuery)
}
- interface sendRecordChangeEmail {
+ interface SelectQuery {
/**
- * SendUserChangeEmail sends a change email confirmation email to the specified user.
+ * Build builds the SELECT query and returns an executable Query object.
*/
- (app: CoreApp, record: models.Record, newEmail: string): void
- }
-}
-
-/**
- * Package models implements various services used for request data
- * validation and applying changes to existing DB models through the app Dao.
- */
-namespace forms {
- // @ts-ignore
- import validation = ozzo_validation
- /**
- * AdminLogin is an admin email/pass login form.
- */
- interface AdminLogin {
- identity: string
- password: string
+ build(): (Query)
}
- interface newAdminLogin {
+ interface SelectQuery {
/**
- * NewAdminLogin creates a new [AdminLogin] form initialized with
- * the provided [CoreApp] instance.
+ * One executes the SELECT query and populates the first row of the result into the specified variable.
*
- * If you want to submit the form as part of a transaction,
- * you can change the default Dao via [SetDao()].
+ * If the query does not specify a "from" clause, the method will try to infer the name of the table
+ * to be selected from by calling getTableName() which will return either the variable type name
+ * or the TableName() method if the variable implements the TableModel interface.
+ *
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
*/
- (app: CoreApp): (AdminLogin)
+ one(a: {
+ }): void
}
- interface AdminLogin {
+ interface SelectQuery {
/**
- * SetDao replaces the default form Dao instance with the provided one.
+ * Model selects the row with the specified primary key and populates the model with the row data.
+ *
+ * The model variable should be a pointer to a struct. If the query does not specify a "from" clause,
+ * it will use the model struct to determine which table to select data from. It will also use the model
+ * to infer the name of the primary key column. Only simple primary key is supported. For composite primary keys,
+ * please use Where() to specify the filtering condition.
*/
- setDao(dao: daos.Dao): void
+ model(pk: {
+ }, model: {
+ }): void
+ }
+ interface SelectQuery {
+ /**
+ * All executes the SELECT query and populates all rows of the result into a slice.
+ *
+ * Note that the slice must be passed in as a pointer.
+ *
+ * If the query does not specify a "from" clause, the method will try to infer the name of the table
+ * to be selected from by calling getTableName() which will return either the type name of the slice elements
+ * or the TableName() method if the slice element implements the TableModel interface.
+ */
+ all(slice: {
+ }): void
+ }
+ interface SelectQuery {
+ /**
+ * Rows builds and executes the SELECT query and returns a Rows object for data retrieval purpose.
+ * This is a shortcut to SelectQuery.Build().Rows()
+ */
+ rows(): (Rows)
+ }
+ interface SelectQuery {
+ /**
+ * Row builds and executes the SELECT query and populates the first row of the result into the specified variables.
+ * This is a shortcut to SelectQuery.Build().Row()
+ */
+ row(...a: {
+ }[]): void
+ }
+ interface SelectQuery {
+ /**
+ * Column builds and executes the SELECT statement and populates the first column of the result into a slice.
+ * Note that the parameter must be a pointer to a slice.
+ * This is a shortcut to SelectQuery.Build().Column()
+ */
+ column(a: {
+ }): void
+ }
+ /**
+ * QueryInfo represents a debug/info struct with exported SelectQuery fields.
+ */
+ interface QueryInfo {
+ builder: Builder
+ selects: Array
+ distinct: boolean
+ selectOption: string
+ from: Array
+ where: Expression
+ join: Array
+ orderBy: Array
+ groupBy: Array
+ having: Expression
+ union: Array
+ limit: number
+ offset: number
+ params: Params
+ context: context.Context
+ buildHook: BuildHookFunc
+ }
+ interface SelectQuery {
+ /**
+ * Info exports common SelectQuery fields allowing to inspect the
+ * current select query options.
+ */
+ info(): (QueryInfo)
+ }
+ /**
+ * FieldMapFunc converts a struct field name into a DB column name.
+ */
+ interface FieldMapFunc {(_arg0: string): string }
+ /**
+ * TableMapFunc converts a sample struct into a DB table name.
+ */
+ interface TableMapFunc {(a: {
+ }): string }
+ interface structInfo {
+ }
+ type _subSCEkW = structInfo
+ interface structValue extends _subSCEkW {
+ }
+ interface fieldInfo {
+ }
+ interface structInfoMapKey {
+ }
+ /**
+ * PostScanner is an optional interface used by ScanStruct.
+ */
+ interface PostScanner {
+ [key:string]: any;
+ /**
+ * PostScan executes right after the struct has been populated
+ * with the DB values, allowing you to further normalize or validate
+ * the loaded data.
+ */
+ postScan(): void
+ }
+ interface defaultFieldMapFunc {
+ /**
+ * DefaultFieldMapFunc maps a field name to a DB column name.
+ * The mapping rule set by this method is that words in a field name will be separated by underscores
+ * and the name will be turned into lower case. For example, "FirstName" maps to "first_name", and "MyID" becomes "my_id".
+ * See DB.FieldMapper for more details.
+ */
+ (f: string): string
+ }
+ interface getTableName {
+ /**
+ * GetTableName implements the default way of determining the table name corresponding to the given model struct
+ * or slice of structs. To get the actual table name for a model, you should use DB.TableMapFunc() instead.
+ * Do not call this method in a model's TableName() method because it will cause infinite loop.
+ */
+ (a: {
+ }): string
+ }
+ /**
+ * Tx enhances sql.Tx with additional querying methods.
+ */
+ type _subXvIkD = Builder
+ interface Tx extends _subXvIkD {
+ }
+ interface Tx {
+ /**
+ * Commit commits the transaction.
+ */
+ commit(): void
+ }
+ interface Tx {
+ /**
+ * Rollback aborts the transaction.
+ */
+ rollback(): void
+ }
+}
+
+/**
+ * Package tokens implements various user and admin tokens generation methods.
+ */
+namespace tokens {
+ interface newAdminAuthToken {
+ /**
+ * NewAdminAuthToken generates and returns a new admin authentication token.
+ */
+ (app: CoreApp, admin: models.Admin): string
+ }
+ interface newAdminResetPasswordToken {
+ /**
+ * NewAdminResetPasswordToken generates and returns a new admin password reset request token.
+ */
+ (app: CoreApp, admin: models.Admin): string
+ }
+ interface newAdminFileToken {
+ /**
+ * NewAdminFileToken generates and returns a new admin private file access token.
+ */
+ (app: CoreApp, admin: models.Admin): string
+ }
+ interface newRecordAuthToken {
+ /**
+ * NewRecordAuthToken generates and returns a new auth record authentication token.
+ */
+ (app: CoreApp, record: models.Record): string
+ }
+ interface newRecordVerifyToken {
+ /**
+ * NewRecordVerifyToken generates and returns a new record verification token.
+ */
+ (app: CoreApp, record: models.Record): string
+ }
+ interface newRecordResetPasswordToken {
+ /**
+ * NewRecordResetPasswordToken generates and returns a new auth record password reset request token.
+ */
+ (app: CoreApp, record: models.Record): string
+ }
+ interface newRecordChangeEmailToken {
+ /**
+ * NewRecordChangeEmailToken generates and returns a new auth record change email request token.
+ */
+ (app: CoreApp, record: models.Record, newEmail: string): string
+ }
+ interface newRecordFileToken {
+ /**
+ * NewRecordFileToken generates and returns a new record private file access token.
+ */
+ (app: CoreApp, record: models.Record): string
+ }
+}
+
+/**
+ * Package mails implements various helper methods for sending user and admin
+ * emails like forgotten password, verification, etc.
+ */
+namespace mails {
+ interface sendAdminPasswordReset {
+ /**
+ * SendAdminPasswordReset sends a password reset request email to the specified admin.
+ */
+ (app: CoreApp, admin: models.Admin): void
+ }
+ interface sendRecordPasswordReset {
+ /**
+ * SendRecordPasswordReset sends a password reset request email to the specified user.
+ */
+ (app: CoreApp, authRecord: models.Record): void
+ }
+ interface sendRecordVerification {
+ /**
+ * SendRecordVerification sends a verification request email to the specified user.
+ */
+ (app: CoreApp, authRecord: models.Record): void
+ }
+ interface sendRecordChangeEmail {
+ /**
+ * SendRecordChangeEmail sends a change email confirmation email to the specified user.
+ */
+ (app: CoreApp, record: models.Record, newEmail: string): void
+ }
+}
+
+/**
+ * Package models implements various services used for request data
+ * validation and applying changes to existing DB models through the app Dao.
+ */
+namespace forms {
+ // @ts-ignore
+ import validation = ozzo_validation
+ /**
+ * AdminLogin is an admin email/pass login form.
+ */
+ interface AdminLogin {
+ identity: string
+ password: string
+ }
+ interface newAdminLogin {
+ /**
+ * NewAdminLogin creates a new [AdminLogin] form initialized with
+ * the provided [CoreApp] instance.
+ *
+ * If you want to submit the form as part of a transaction,
+ * you can change the default Dao via [SetDao()].
+ */
+ (app: CoreApp): (AdminLogin)
+ }
+ interface AdminLogin {
+ /**
+ * SetDao replaces the default form Dao instance with the provided one.
+ */
+ setDao(dao: daos.Dao): void
}
interface AdminLogin {
/**
@@ -5971,7 +6156,7 @@ namespace forms {
/**
* Validate makes the form validatable by implementing [validation.Validatable] interface.
*
- * This method doesn't checks whether auth record with `form.Email` exists (this is done on Submit).
+ * This method doesn't check whether auth record with `form.Email` exists (this is done on Submit).
*/
validate(): void
}
@@ -6089,10 +6274,10 @@ namespace forms {
*
* ```
* // mark only only 2 files for removal
- * form.AddFiles("documents", "file1_aw4bdrvws6.txt", "file2_xwbs36bafv.txt")
+ * form.RemoveFiles("documents", "file1_aw4bdrvws6.txt", "file2_xwbs36bafv.txt")
*
* // mark all "documents" files for removal
- * form.AddFiles("documents")
+ * form.RemoveFiles("documents")
* ```
*/
removeFiles(key: string, ...toDelete: string[]): void
@@ -6211,8 +6396,8 @@ namespace forms {
/**
* SettingsUpsert is a [settings.Settings] upsert (create/update) form.
*/
- type _subxSkzb = settings.Settings
- interface SettingsUpsert extends _subxSkzb {
+ type _subxZPsK = settings.Settings
+ interface SettingsUpsert extends _subxZPsK {
}
interface newSettingsUpsert {
/**
@@ -6387,8 +6572,8 @@ namespace apis {
interface healthApi {
}
interface healthCheckResponse {
- code: number
message: string
+ code: number
data: {
canBackup: boolean
}
@@ -6534,6 +6719,11 @@ namespace apis {
codeChallenge: string
codeChallengeMethod: string
}
+ interface oauth2EventMessage {
+ state: string
+ code: string
+ error: string
+ }
interface recordApi {
}
interface requestData {
@@ -6562,7 +6752,7 @@ namespace apis {
* ```
* - expands relations (if defaultExpands and/or ?expand query param is set)
* - ensures that the emails of the auth record and its expanded auth relations
- * are visibe only for the current logged admin, record owner or record with manage access
+ * are visible only for the current logged admin, record owner or record with manage access
* ```
*/
(c: echo.Context, dao: daos.Dao, record: models.Record, ...defaultExpands: string[]): void
@@ -6573,7 +6763,7 @@ namespace apis {
* ```
* - expands relations (if defaultExpands and/or ?expand query param is set)
* - ensures that the emails of the auth records and their expanded auth relations
- * are visibe only for the current logged admin, record owner or record with manage access
+ * are visible only for the current logged admin, record owner or record with manage access
* ```
*/
(c: echo.Context, dao: daos.Dao, records: Array<(models.Record | undefined)>, ...defaultExpands: string[]): void
@@ -6638,8 +6828,8 @@ namespace pocketbase {
/**
* appWrapper serves as a private CoreApp instance wrapper.
*/
- type _subDghba = CoreApp
- interface appWrapper extends _subDghba {
+ type _subXbotK = CoreApp
+ interface appWrapper extends _subXbotK {
}
/**
* PocketBase defines a PocketBase app launcher.
@@ -6647,8 +6837,8 @@ namespace pocketbase {
* It implements [CoreApp] via embedding and all of the app interface methods
* could be accessed directly through the instance (eg. PocketBase.DataDir()).
*/
- type _subQSfhg = appWrapper
- interface PocketBase extends _subQSfhg {
+ type _subYRHBu = appWrapper
+ interface PocketBase extends _subYRHBu {
/**
* RootCmd is the main console command
*/
@@ -6731,111 +6921,6 @@ namespace pocketbase {
}
}
-/**
- * Package template is a thin wrapper around the standard html/template
- * and text/template packages that implements a convenient registry to
- * load and cache templates on the fly concurrently.
- *
- * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
- *
- * Example:
- *
- * ```
- * registry := template.NewRegistry()
- *
- * html1, err := registry.LoadFiles(
- * // the files set wil be parsed only once and then cached
- * "layout.html",
- * "content.html",
- * ).Render(map[string]any{"name": "John"})
- *
- * html2, err := registry.LoadFiles(
- * // reuse the already parsed and cached files set
- * "layout.html",
- * "content.html",
- * ).Render(map[string]any{"name": "Jane"})
- * ```
- */
-namespace template {
- interface newRegistry {
- /**
- * NewRegistry creates and initializes a new templates registry with
- * some defaults (eg. global "raw" template function for unescaped HTML).
- *
- * Use the Registry.Load* methods to load templates into the registry.
- */
- (): (Registry)
- }
- /**
- * Registry defines a templates registry that is safe to be used by multiple goroutines.
- *
- * Use the Registry.Load* methods to load templates into the registry.
- */
- interface Registry {
- }
- interface Registry {
- /**
- * AddFuncs registers new global template functions.
- *
- * The key of each map entry is the function name that will be used in the templates.
- * If a function with the map entry name already exists it will be replaced with the new one.
- *
- * The value of each map entry is a function that must have either a
- * single return value, or two return values of which the second has type error.
- *
- * Example:
- *
- * r.AddFuncs(map[string]any{
- * ```
- * "toUpper": func(str string) string {
- * return strings.ToUppser(str)
- * },
- * ...
- * ```
- * })
- */
- addFuncs(funcs: _TygojaDict): (Registry)
- }
- interface Registry {
- /**
- * LoadFiles caches (if not already) the specified filenames set as a
- * single template and returns a ready to use Renderer instance.
- *
- * There must be at least 1 filename specified.
- */
- loadFiles(...filenames: string[]): (Renderer)
- }
- interface Registry {
- /**
- * LoadString caches (if not already) the specified inline string as a
- * single template and returns a ready to use Renderer instance.
- */
- loadString(text: string): (Renderer)
- }
- interface Registry {
- /**
- * LoadFS caches (if not already) the specified fs and globPatterns
- * pair as single template and returns a ready to use Renderer instance.
- *
- * There must be at least 1 file matching the provided globPattern(s)
- * (note that most file names serves as glob patterns matching themselves).
- */
- loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer)
- }
- /**
- * Renderer defines a single parsed template.
- */
- interface Renderer {
- }
- interface Renderer {
- /**
- * Render executes the template with the specified data as the dot object
- * and returns the result as plain string.
- */
- render(data: any): string
- }
-}
-
/**
* Package io provides basic interfaces to I/O primitives.
* Its primary job is to wrap existing implementations of such primitives,
@@ -6884,6 +6969,21 @@ namespace io {
[key:string]: any;
read(p: string|Array): number
}
+ /**
+ * Writer is the interface that wraps the basic Write method.
+ *
+ * Write writes len(p) bytes from p to the underlying data stream.
+ * It returns the number of bytes written from p (0 <= n <= len(p))
+ * and any error encountered that caused the write to stop early.
+ * Write must return a non-nil error if it returns n < len(p).
+ * Write must not modify the slice data, even temporarily.
+ *
+ * Implementations must not retain p.
+ */
+ interface Writer {
+ [key:string]: any;
+ write(p: string|Array): number
+ }
/**
* ReadSeekCloser is the interface that groups the basic Read, Seek and Close
* methods.
@@ -6902,7 +7002,7 @@ namespace bytes {
* A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
* io.ByteScanner, and io.RuneScanner interfaces by reading from
* a byte slice.
- * Unlike a Buffer, a Reader is read-only and supports seeking.
+ * Unlike a [Buffer], a Reader is read-only and supports seeking.
* The zero value for Reader operates like a Reader of an empty slice.
*/
interface Reader {
@@ -6917,62 +7017,62 @@ namespace bytes {
interface Reader {
/**
* Size returns the original length of the underlying byte slice.
- * Size is the number of bytes available for reading via ReadAt.
- * The result is unaffected by any method calls except Reset.
+ * Size is the number of bytes available for reading via [Reader.ReadAt].
+ * The result is unaffected by any method calls except [Reader.Reset].
*/
size(): number
}
interface Reader {
/**
- * Read implements the io.Reader interface.
+ * Read implements the [io.Reader] interface.
*/
read(b: string|Array): number
}
interface Reader {
/**
- * ReadAt implements the io.ReaderAt interface.
+ * ReadAt implements the [io.ReaderAt] interface.
*/
readAt(b: string|Array, off: number): number
}
interface Reader {
/**
- * ReadByte implements the io.ByteReader interface.
+ * ReadByte implements the [io.ByteReader] interface.
*/
readByte(): number
}
interface Reader {
/**
- * UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
+ * UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
*/
unreadByte(): void
}
interface Reader {
/**
- * ReadRune implements the io.RuneReader interface.
+ * ReadRune implements the [io.RuneReader] interface.
*/
readRune(): [number, number]
}
interface Reader {
/**
- * UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
+ * UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
*/
unreadRune(): void
}
interface Reader {
/**
- * Seek implements the io.Seeker interface.
+ * Seek implements the [io.Seeker] interface.
*/
seek(offset: number, whence: number): number
}
interface Reader {
/**
- * WriteTo implements the io.WriterTo interface.
+ * WriteTo implements the [io.WriterTo] interface.
*/
writeTo(w: io.Writer): number
}
interface Reader {
/**
- * Reset resets the Reader to be reading from b.
+ * Reset resets the [Reader.Reader] to be reading from b.
*/
reset(b: string|Array): void
}
@@ -6995,11 +7095,11 @@ namespace bytes {
* err is an operating system error describing the failure.
* On most systems, that error has type syscall.Errno.
*
- * Deprecated: this package is locked down. Callers should use the
- * corresponding package in the golang.org/x/sys repository instead.
- * That is also where updates required by new systems or versions
- * should be applied. See https://golang.org/s/go1.4-syscall for more
- * information.
+ * NOTE: Most of the functions, types, and constants defined in
+ * this package are also available in the [golang.org/x/sys] package.
+ * That package has more system call support than this one,
+ * and most new code should prefer that package where possible.
+ * See https://golang.org/s/go1.4-syscall for more information.
*/
namespace syscall {
interface SysProcAttr {
@@ -7024,8 +7124,8 @@ namespace syscall {
* This is only meaningful if Setsid is true.
*/
setctty: boolean
- noctty: boolean // Detach fd 0 from controlling terminal
- ctty: number // Controlling TTY fd
+ noctty: boolean // Detach fd 0 from controlling terminal.
+ ctty: number // Controlling TTY fd.
/**
* Foreground places the child process group in the foreground.
* This implies Setpgid. The Ctty field must be set to
@@ -7042,8 +7142,8 @@ namespace syscall {
* There are more details at https://go.dev/issue/27505.
*/
pdeathsig: Signal
- cloneflags: number // Flags for clone calls (Linux only)
- unshareflags: number // Flags for unshare calls (Linux only)
+ cloneflags: number // Flags for clone calls.
+ unshareflags: number // Flags for unshare calls.
uidMappings: Array // User ID mappings for user namespaces.
gidMappings: Array // Group ID mappings for user namespaces.
/**
@@ -7053,9 +7153,15 @@ namespace syscall {
* users this should be set to false for mappings work.
*/
gidMappingsEnableSetgroups: boolean
- ambientCaps: Array // Ambient capabilities (Linux only)
+ ambientCaps: Array // Ambient capabilities.
useCgroupFD: boolean // Whether to make use of the CgroupFD field.
cgroupFD: number // File descriptor of a cgroup to put the new process into.
+ /**
+ * PidFD, if not nil, is used to store the pidfd of a child, if the
+ * functionality is supported by the kernel, or -1. Note *PidFD is
+ * changed only if the process starts successfully.
+ */
+ pidFD?: number
}
// @ts-ignore
import errorspkg = errors
@@ -7199,6 +7305,14 @@ namespace syscall {
* For debugging, the result of t.String does include the monotonic
* clock reading if present. If t != u because of different monotonic clock readings,
* that difference will be visible when printing t.String() and u.String().
+ *
+ * # Timer Resolution
+ *
+ * Timer resolution varies depending on the Go runtime, the operating system
+ * and the underlying hardware.
+ * On Unix, the resolution is approximately 1ms.
+ * On Windows, the default resolution is approximately 16ms, but
+ * a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod].
*/
namespace time {
interface Time {
@@ -7263,12 +7377,10 @@ namespace time {
* As this time is unlikely to come up in practice, the IsZero method gives
* a simple way of detecting a time that has not been initialized explicitly.
*
- * Each Time has associated with it a Location, consulted when computing the
- * presentation form of the time, such as in the Format, Hour, and Year methods.
- * The methods Local, UTC, and In return a Time with a specific location.
- * Changing the location in this way changes only the presentation; it does not
- * change the instant in time being denoted and therefore does not affect the
- * computations described in earlier paragraphs.
+ * Each time has an associated Location. The methods Local, UTC, and In return a
+ * Time with a specific Location. Changing the Location of a Time value with
+ * these methods does not change the actual instant it represents, only the time
+ * zone in which to interpret it.
*
* Representations of a Time value saved by the GobEncode, MarshalBinary,
* MarshalJSON, and MarshalText methods store the Time.Location's offset, but not
@@ -7502,6 +7614,15 @@ namespace time {
* For example, AddDate(-1, 2, 3) applied to January 1, 2011
* returns March 4, 2010.
*
+ * Note that dates are fundamentally coupled to timezones, and calendrical
+ * periods like days don't have fixed durations. AddDate uses the Location of
+ * the Time value to determine these durations. That means that the same
+ * AddDate arguments can produce a different shift in absolute time depending on
+ * the base Time value and its Location. For example, AddDate(0, 0, 1) applied
+ * to 12:00 on March 27 always returns 12:00 on March 28. At some locations and
+ * in some years this is a 24 hour shift. In others it's a 23 hour shift due to
+ * daylight savings time transitions.
+ *
* AddDate normalizes its result in the same way that Date does,
* so, for example, adding one month to October 31 yields
* December 1, the normalized form for November 31.
@@ -7685,220 +7806,63 @@ namespace time {
}
/**
- * Package context defines the Context type, which carries deadlines,
- * cancellation signals, and other request-scoped values across API boundaries
- * and between processes.
- *
- * Incoming requests to a server should create a [Context], and outgoing
- * calls to servers should accept a Context. The chain of function
- * calls between them must propagate the Context, optionally replacing
- * it with a derived Context created using [WithCancel], [WithDeadline],
- * [WithTimeout], or [WithValue]. When a Context is canceled, all
- * Contexts derived from it are also canceled.
- *
- * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a
- * Context (the parent) and return a derived Context (the child) and a
- * [CancelFunc]. Calling the CancelFunc cancels the child and its
- * children, removes the parent's reference to the child, and stops
- * any associated timers. Failing to call the CancelFunc leaks the
- * child and its children until the parent is canceled or the timer
- * fires. The go vet tool checks that CancelFuncs are used on all
- * control-flow paths.
- *
- * The [WithCancelCause] function returns a [CancelCauseFunc], which
- * takes an error and records it as the cancellation cause. Calling
- * [Cause] on the canceled context or any of its children retrieves
- * the cause. If no cause is specified, Cause(ctx) returns the same
- * value as ctx.Err().
- *
- * Programs that use Contexts should follow these rules to keep interfaces
- * consistent across packages and enable static analysis tools to check context
- * propagation:
- *
- * Do not store Contexts inside a struct type; instead, pass a Context
- * explicitly to each function that needs it. The Context should be the first
- * parameter, typically named ctx:
- *
- * ```
- * func DoSomething(ctx context.Context, arg Arg) error {
- * // ... use ctx ...
- * }
- * ```
- *
- * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
- * if you are unsure about which Context to use.
- *
- * Use context Values only for request-scoped data that transits processes and
- * APIs, not for passing optional parameters to functions.
- *
- * The same Context may be passed to functions running in different goroutines;
- * Contexts are safe for simultaneous use by multiple goroutines.
+ * Package fs defines basic interfaces to a file system.
+ * A file system can be provided by the host operating system
+ * but also by other packages.
*
- * See https://blog.golang.org/context for example code for a server that uses
- * Contexts.
+ * See the [testing/fstest] package for support with testing
+ * implementations of file systems.
*/
-namespace context {
+namespace fs {
/**
- * A Context carries a deadline, a cancellation signal, and other values across
- * API boundaries.
+ * An FS provides access to a hierarchical file system.
*
- * Context's methods may be called by multiple goroutines simultaneously.
+ * The FS interface is the minimum implementation required of the file system.
+ * A file system may implement additional interfaces,
+ * such as [ReadFileFS], to provide additional or optimized functionality.
+ *
+ * [testing/fstest.TestFS] may be used to test implementations of an FS for
+ * correctness.
*/
- interface Context {
+ interface FS {
[key:string]: any;
/**
- * Deadline returns the time when work done on behalf of this context
- * should be canceled. Deadline returns ok==false when no deadline is
- * set. Successive calls to Deadline return the same results.
- */
- deadline(): [time.Time, boolean]
- /**
- * Done returns a channel that's closed when work done on behalf of this
- * context should be canceled. Done may return nil if this context can
- * never be canceled. Successive calls to Done return the same value.
- * The close of the Done channel may happen asynchronously,
- * after the cancel function returns.
- *
- * WithCancel arranges for Done to be closed when cancel is called;
- * WithDeadline arranges for Done to be closed when the deadline
- * expires; WithTimeout arranges for Done to be closed when the timeout
- * elapses.
- *
- * Done is provided for use in select statements:
+ * Open opens the named file.
*
- * // Stream generates values with DoSomething and sends them to out
- * // until DoSomething returns an error or ctx.Done is closed.
- * func Stream(ctx context.Context, out chan<- Value) error {
- * for {
- * v, err := DoSomething(ctx)
- * if err != nil {
- * return err
- * }
- * select {
- * case <-ctx.Done():
- * return ctx.Err()
- * case out <- v:
- * }
- * }
- * }
+ * When Open returns an error, it should be of type *PathError
+ * with the Op field set to "open", the Path field set to name,
+ * and the Err field describing the problem.
*
- * See https://blog.golang.org/pipelines for more examples of how to use
- * a Done channel for cancellation.
+ * Open should reject attempts to open names that do not satisfy
+ * ValidPath(name), returning a *PathError with Err set to
+ * ErrInvalid or ErrNotExist.
*/
- done(): undefined
+ open(name: string): File
+ }
+ /**
+ * A File provides access to a single file.
+ * The File interface is the minimum implementation required of the file.
+ * Directory files should also implement [ReadDirFile].
+ * A file may implement [io.ReaderAt] or [io.Seeker] as optimizations.
+ */
+ interface File {
+ [key:string]: any;
+ stat(): FileInfo
+ read(_arg0: string|Array): number
+ close(): void
+ }
+ /**
+ * A DirEntry is an entry read from a directory
+ * (using the [ReadDir] function or a [ReadDirFile]'s ReadDir method).
+ */
+ interface DirEntry {
+ [key:string]: any;
/**
- * If Done is not yet closed, Err returns nil.
- * If Done is closed, Err returns a non-nil error explaining why:
- * Canceled if the context was canceled
- * or DeadlineExceeded if the context's deadline passed.
- * After Err returns a non-nil error, successive calls to Err return the same error.
+ * Name returns the name of the file (or subdirectory) described by the entry.
+ * This name is only the final element of the path (the base name), not the entire path.
+ * For example, Name would return "hello.go" not "home/gopher/hello.go".
*/
- err(): void
- /**
- * Value returns the value associated with this context for key, or nil
- * if no value is associated with key. Successive calls to Value with
- * the same key returns the same result.
- *
- * Use context values only for request-scoped data that transits
- * processes and API boundaries, not for passing optional parameters to
- * functions.
- *
- * A key identifies a specific value in a Context. Functions that wish
- * to store values in Context typically allocate a key in a global
- * variable then use that key as the argument to context.WithValue and
- * Context.Value. A key can be any type that supports equality;
- * packages should define keys as an unexported type to avoid
- * collisions.
- *
- * Packages that define a Context key should provide type-safe accessors
- * for the values stored using that key:
- *
- * ```
- * // Package user defines a User type that's stored in Contexts.
- * package user
- *
- * import "context"
- *
- * // User is the type of value stored in the Contexts.
- * type User struct {...}
- *
- * // key is an unexported type for keys defined in this package.
- * // This prevents collisions with keys defined in other packages.
- * type key int
- *
- * // userKey is the key for user.User values in Contexts. It is
- * // unexported; clients use user.NewContext and user.FromContext
- * // instead of using this key directly.
- * var userKey key
- *
- * // NewContext returns a new Context that carries value u.
- * func NewContext(ctx context.Context, u *User) context.Context {
- * return context.WithValue(ctx, userKey, u)
- * }
- *
- * // FromContext returns the User value stored in ctx, if any.
- * func FromContext(ctx context.Context) (*User, bool) {
- * u, ok := ctx.Value(userKey).(*User)
- * return u, ok
- * }
- * ```
- */
- value(key: any): any
- }
-}
-
-/**
- * Package fs defines basic interfaces to a file system.
- * A file system can be provided by the host operating system
- * but also by other packages.
- */
-namespace fs {
- /**
- * An FS provides access to a hierarchical file system.
- *
- * The FS interface is the minimum implementation required of the file system.
- * A file system may implement additional interfaces,
- * such as ReadFileFS, to provide additional or optimized functionality.
- */
- interface FS {
- [key:string]: any;
- /**
- * Open opens the named file.
- *
- * When Open returns an error, it should be of type *PathError
- * with the Op field set to "open", the Path field set to name,
- * and the Err field describing the problem.
- *
- * Open should reject attempts to open names that do not satisfy
- * ValidPath(name), returning a *PathError with Err set to
- * ErrInvalid or ErrNotExist.
- */
- open(name: string): File
- }
- /**
- * A File provides access to a single file.
- * The File interface is the minimum implementation required of the file.
- * Directory files should also implement ReadDirFile.
- * A file may implement io.ReaderAt or io.Seeker as optimizations.
- */
- interface File {
- [key:string]: any;
- stat(): FileInfo
- read(_arg0: string|Array): number
- close(): void
- }
- /**
- * A DirEntry is an entry read from a directory
- * (using the ReadDir function or a ReadDirFile's ReadDir method).
- */
- interface DirEntry {
- [key:string]: any;
- /**
- * Name returns the name of the file (or subdirectory) described by the entry.
- * This name is only the final element of the path (the base name), not the entire path.
- * For example, Name would return "hello.go" not "home/gopher/hello.go".
- */
- name(): string
+ name(): string
/**
* IsDir reports whether the entry describes a directory.
*/
@@ -7919,7 +7883,7 @@ namespace fs {
info(): FileInfo
}
/**
- * A FileInfo describes a file and is returned by Stat.
+ * A FileInfo describes a file and is returned by [Stat].
*/
interface FileInfo {
[key:string]: any;
@@ -7935,7 +7899,7 @@ namespace fs {
* The bits have the same definition on all systems, so that
* information about files can be moved from one system
* to another portably. Not all bits apply to all systems.
- * The only required bit is ModeDir for directories.
+ * The only required bit is [ModeDir] for directories.
*/
interface FileMode extends Number{}
interface FileMode {
@@ -7944,7 +7908,7 @@ namespace fs {
interface FileMode {
/**
* IsDir reports whether m describes a directory.
- * That is, it tests for the ModeDir bit being set in m.
+ * That is, it tests for the [ModeDir] bit being set in m.
*/
isDir(): boolean
}
@@ -7957,13 +7921,13 @@ namespace fs {
}
interface FileMode {
/**
- * Perm returns the Unix permission bits in m (m & ModePerm).
+ * Perm returns the Unix permission bits in m (m & [ModePerm]).
*/
perm(): FileMode
}
interface FileMode {
/**
- * Type returns type bits in m (m & ModeType).
+ * Type returns type bits in m (m & [ModeType]).
*/
type(): FileMode
}
@@ -7988,51 +7952,51 @@ namespace fs {
timeout(): boolean
}
/**
- * WalkDirFunc is the type of the function called by WalkDir to visit
+ * WalkDirFunc is the type of the function called by [WalkDir] to visit
* each file or directory.
*
- * The path argument contains the argument to WalkDir as a prefix.
+ * The path argument contains the argument to [WalkDir] as a prefix.
* That is, if WalkDir is called with root argument "dir" and finds a file
* named "a" in that directory, the walk function will be called with
* argument "dir/a".
*
- * The d argument is the fs.DirEntry for the named path.
+ * The d argument is the [DirEntry] for the named path.
*
- * The error result returned by the function controls how WalkDir
- * continues. If the function returns the special value SkipDir, WalkDir
+ * The error result returned by the function controls how [WalkDir]
+ * continues. If the function returns the special value [SkipDir], WalkDir
* skips the current directory (path if d.IsDir() is true, otherwise
* path's parent directory). If the function returns the special value
- * SkipAll, WalkDir skips all remaining files and directories. Otherwise,
+ * [SkipAll], WalkDir skips all remaining files and directories. Otherwise,
* if the function returns a non-nil error, WalkDir stops entirely and
* returns that error.
*
* The err argument reports an error related to path, signaling that
- * WalkDir will not walk into that directory. The function can decide how
+ * [WalkDir] will not walk into that directory. The function can decide how
* to handle that error; as described earlier, returning the error will
* cause WalkDir to stop walking the entire tree.
*
- * WalkDir calls the function with a non-nil err argument in two cases.
+ * [WalkDir] calls the function with a non-nil err argument in two cases.
*
- * First, if the initial fs.Stat on the root directory fails, WalkDir
+ * First, if the initial [Stat] on the root directory fails, WalkDir
* calls the function with path set to root, d set to nil, and err set to
- * the error from fs.Stat.
+ * the error from [fs.Stat].
*
- * Second, if a directory's ReadDir method fails, WalkDir calls the
+ * Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the
* function with path set to the directory's path, d set to an
- * fs.DirEntry describing the directory, and err set to the error from
+ * [DirEntry] describing the directory, and err set to the error from
* ReadDir. In this second case, the function is called twice with the
* path of the directory: the first call is before the directory read is
* attempted and has err set to nil, giving the function a chance to
- * return SkipDir or SkipAll and avoid the ReadDir entirely. The second call
+ * return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call
* is after a failed ReadDir and reports the error from ReadDir.
* (If ReadDir succeeds, there is no second call.)
*
- * The differences between WalkDirFunc compared to filepath.WalkFunc are:
+ * The differences between WalkDirFunc compared to [path/filepath.WalkFunc] are:
*
* ```
- * - The second argument has type fs.DirEntry instead of fs.FileInfo.
- * - The function is called before reading a directory, to allow SkipDir
- * or SkipAll to bypass the directory read entirely or skip all remaining
+ * - The second argument has type [DirEntry] instead of [FileInfo].
+ * - The function is called before reading a directory, to allow [SkipDir]
+ * or [SkipAll] to bypass the directory read entirely or skip all remaining
* files and directories respectively.
* - If a directory read fails, the function is called a second time
* for that directory to report the error.
@@ -8042,8736 +8006,8208 @@ namespace fs {
}
/**
- * Package multipart implements MIME multipart parsing, as defined in RFC
- * 2046.
- *
- * The implementation is sufficient for HTTP (RFC 2388) and the multipart
- * bodies generated by popular browsers.
- *
- * # Limits
- *
- * To protect against malicious inputs, this package sets limits on the size
- * of the MIME data it processes.
- *
- * Reader.NextPart and Reader.NextRawPart limit the number of headers in a
- * part to 10000 and Reader.ReadForm limits the total number of headers in all
- * FileHeaders to 10000.
- * These limits may be adjusted with the GODEBUG=multipartmaxheaders=
- * setting.
- *
- * Reader.ReadForm further limits the number of parts in a form to 1000.
- * This limit may be adjusted with the GODEBUG=multipartmaxparts=
- * setting.
- */
-/**
- * Copyright 2023 The Go Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file.
- */
-namespace multipart {
- /**
- * A FileHeader describes a file part of a multipart request.
- */
- interface FileHeader {
- filename: string
- header: textproto.MIMEHeader
- size: number
- }
- interface FileHeader {
- /**
- * Open opens and returns the FileHeader's associated File.
- */
- open(): File
- }
-}
-
-/**
- * Package http provides HTTP client and server implementations.
- *
- * Get, Head, Post, and PostForm make HTTP (or HTTPS) requests:
- *
- * ```
- * resp, err := http.Get("http://example.com/")
- * ...
- * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
- * ...
- * resp, err := http.PostForm("http://example.com/form",
- * url.Values{"key": {"Value"}, "id": {"123"}})
- * ```
- *
- * The caller must close the response body when finished with it:
- *
- * ```
- * resp, err := http.Get("http://example.com/")
- * if err != nil {
- * // handle error
- * }
- * defer resp.Body.Close()
- * body, err := io.ReadAll(resp.Body)
- * // ...
- * ```
- *
- * # Clients and Transports
+ * Package context defines the Context type, which carries deadlines,
+ * cancellation signals, and other request-scoped values across API boundaries
+ * and between processes.
*
- * For control over HTTP client headers, redirect policy, and other
- * settings, create a Client:
+ * Incoming requests to a server should create a [Context], and outgoing
+ * calls to servers should accept a Context. The chain of function
+ * calls between them must propagate the Context, optionally replacing
+ * it with a derived Context created using [WithCancel], [WithDeadline],
+ * [WithTimeout], or [WithValue]. When a Context is canceled, all
+ * Contexts derived from it are also canceled.
*
- * ```
- * client := &http.Client{
- * CheckRedirect: redirectPolicyFunc,
- * }
+ * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a
+ * Context (the parent) and return a derived Context (the child) and a
+ * [CancelFunc]. Calling the CancelFunc cancels the child and its
+ * children, removes the parent's reference to the child, and stops
+ * any associated timers. Failing to call the CancelFunc leaks the
+ * child and its children until the parent is canceled or the timer
+ * fires. The go vet tool checks that CancelFuncs are used on all
+ * control-flow paths.
*
- * resp, err := client.Get("http://example.com")
- * // ...
+ * The [WithCancelCause] function returns a [CancelCauseFunc], which
+ * takes an error and records it as the cancellation cause. Calling
+ * [Cause] on the canceled context or any of its children retrieves
+ * the cause. If no cause is specified, Cause(ctx) returns the same
+ * value as ctx.Err().
*
- * req, err := http.NewRequest("GET", "http://example.com", nil)
- * // ...
- * req.Header.Add("If-None-Match", `W/"wyzzy"`)
- * resp, err := client.Do(req)
- * // ...
- * ```
+ * Programs that use Contexts should follow these rules to keep interfaces
+ * consistent across packages and enable static analysis tools to check context
+ * propagation:
*
- * For control over proxies, TLS configuration, keep-alives,
- * compression, and other settings, create a Transport:
+ * Do not store Contexts inside a struct type; instead, pass a Context
+ * explicitly to each function that needs it. The Context should be the first
+ * parameter, typically named ctx:
*
* ```
- * tr := &http.Transport{
- * MaxIdleConns: 10,
- * IdleConnTimeout: 30 * time.Second,
- * DisableCompression: true,
+ * func DoSomething(ctx context.Context, arg Arg) error {
+ * // ... use ctx ...
* }
- * client := &http.Client{Transport: tr}
- * resp, err := client.Get("https://example.com")
- * ```
- *
- * Clients and Transports are safe for concurrent use by multiple
- * goroutines and for efficiency should only be created once and re-used.
- *
- * # Servers
- *
- * ListenAndServe starts an HTTP server with a given address and handler.
- * The handler is usually nil, which means to use DefaultServeMux.
- * Handle and HandleFunc add handlers to DefaultServeMux:
- *
- * ```
- * http.Handle("/foo", fooHandler)
- *
- * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
- * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
- * })
- *
- * log.Fatal(http.ListenAndServe(":8080", nil))
* ```
*
- * More control over the server's behavior is available by creating a
- * custom Server:
+ * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
+ * if you are unsure about which Context to use.
*
- * ```
- * s := &http.Server{
- * Addr: ":8080",
- * Handler: myHandler,
- * ReadTimeout: 10 * time.Second,
- * WriteTimeout: 10 * time.Second,
- * MaxHeaderBytes: 1 << 20,
- * }
- * log.Fatal(s.ListenAndServe())
- * ```
+ * Use context Values only for request-scoped data that transits processes and
+ * APIs, not for passing optional parameters to functions.
*
- * # HTTP/2
+ * The same Context may be passed to functions running in different goroutines;
+ * Contexts are safe for simultaneous use by multiple goroutines.
*
- * Starting with Go 1.6, the http package has transparent support for the
- * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
- * can do so by setting Transport.TLSNextProto (for clients) or
- * Server.TLSNextProto (for servers) to a non-nil, empty
- * map. Alternatively, the following GODEBUG settings are
- * currently supported:
- *
- * ```
- * GODEBUG=http2client=0 # disable HTTP/2 client support
- * GODEBUG=http2server=0 # disable HTTP/2 server support
- * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
- * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
- * ```
- *
- * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
- *
- * The http package's Transport and Server both automatically enable
- * HTTP/2 support for simple configurations. To enable HTTP/2 for more
- * complex configurations, to use lower-level HTTP/2 features, or to use
- * a newer version of Go's http2 package, import "golang.org/x/net/http2"
- * directly and use its ConfigureTransport and/or ConfigureServer
- * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
- * package takes precedence over the net/http package's built-in HTTP/2
- * support.
+ * See https://blog.golang.org/context for example code for a server that uses
+ * Contexts.
*/
-namespace http {
- // @ts-ignore
- import mathrand = rand
- // @ts-ignore
- import urlpkg = url
+namespace context {
/**
- * A Request represents an HTTP request received by a server
- * or to be sent by a client.
+ * A Context carries a deadline, a cancellation signal, and other values across
+ * API boundaries.
*
- * The field semantics differ slightly between client and server
- * usage. In addition to the notes on the fields below, see the
- * documentation for Request.Write and RoundTripper.
+ * Context's methods may be called by multiple goroutines simultaneously.
*/
- interface Request {
+ interface Context {
+ [key:string]: any;
/**
- * Method specifies the HTTP method (GET, POST, PUT, etc.).
- * For client requests, an empty string means GET.
- *
- * Go's HTTP client does not support sending a request with
- * the CONNECT method. See the documentation on Transport for
- * details.
+ * Deadline returns the time when work done on behalf of this context
+ * should be canceled. Deadline returns ok==false when no deadline is
+ * set. Successive calls to Deadline return the same results.
*/
- method: string
+ deadline(): [time.Time, boolean]
/**
- * URL specifies either the URI being requested (for server
- * requests) or the URL to access (for client requests).
+ * Done returns a channel that's closed when work done on behalf of this
+ * context should be canceled. Done may return nil if this context can
+ * never be canceled. Successive calls to Done return the same value.
+ * The close of the Done channel may happen asynchronously,
+ * after the cancel function returns.
*
- * For server requests, the URL is parsed from the URI
- * supplied on the Request-Line as stored in RequestURI. For
- * most requests, fields other than Path and RawQuery will be
- * empty. (See RFC 7230, Section 5.3)
+ * WithCancel arranges for Done to be closed when cancel is called;
+ * WithDeadline arranges for Done to be closed when the deadline
+ * expires; WithTimeout arranges for Done to be closed when the timeout
+ * elapses.
*
- * For client requests, the URL's Host specifies the server to
- * connect to, while the Request's Host field optionally
- * specifies the Host header value to send in the HTTP
- * request.
+ * Done is provided for use in select statements:
+ *
+ * // Stream generates values with DoSomething and sends them to out
+ * // until DoSomething returns an error or ctx.Done is closed.
+ * func Stream(ctx context.Context, out chan<- Value) error {
+ * for {
+ * v, err := DoSomething(ctx)
+ * if err != nil {
+ * return err
+ * }
+ * select {
+ * case <-ctx.Done():
+ * return ctx.Err()
+ * case out <- v:
+ * }
+ * }
+ * }
+ *
+ * See https://blog.golang.org/pipelines for more examples of how to use
+ * a Done channel for cancellation.
*/
- url?: url.URL
+ done(): undefined
/**
- * The protocol version for incoming server requests.
- *
- * For client requests, these fields are ignored. The HTTP
- * client code always uses either HTTP/1.1 or HTTP/2.
- * See the docs on Transport for details.
+ * If Done is not yet closed, Err returns nil.
+ * If Done is closed, Err returns a non-nil error explaining why:
+ * Canceled if the context was canceled
+ * or DeadlineExceeded if the context's deadline passed.
+ * After Err returns a non-nil error, successive calls to Err return the same error.
*/
- proto: string // "HTTP/1.0"
- protoMajor: number // 1
- protoMinor: number // 0
+ err(): void
/**
- * Header contains the request header fields either received
- * by the server or to be sent by the client.
+ * Value returns the value associated with this context for key, or nil
+ * if no value is associated with key. Successive calls to Value with
+ * the same key returns the same result.
*
- * If a server received a request with header lines,
+ * Use context values only for request-scoped data that transits
+ * processes and API boundaries, not for passing optional parameters to
+ * functions.
*
- * ```
- * Host: example.com
- * accept-encoding: gzip, deflate
- * Accept-Language: en-us
- * fOO: Bar
- * foo: two
- * ```
+ * A key identifies a specific value in a Context. Functions that wish
+ * to store values in Context typically allocate a key in a global
+ * variable then use that key as the argument to context.WithValue and
+ * Context.Value. A key can be any type that supports equality;
+ * packages should define keys as an unexported type to avoid
+ * collisions.
*
- * then
+ * Packages that define a Context key should provide type-safe accessors
+ * for the values stored using that key:
*
* ```
- * Header = map[string][]string{
- * "Accept-Encoding": {"gzip, deflate"},
- * "Accept-Language": {"en-us"},
- * "Foo": {"Bar", "two"},
- * }
- * ```
+ * // Package user defines a User type that's stored in Contexts.
+ * package user
*
- * For incoming requests, the Host header is promoted to the
- * Request.Host field and removed from the Header map.
+ * import "context"
*
- * HTTP defines that header names are case-insensitive. The
- * request parser implements this by using CanonicalHeaderKey,
- * making the first character and any characters following a
- * hyphen uppercase and the rest lowercase.
+ * // User is the type of value stored in the Contexts.
+ * type User struct {...}
*
- * For client requests, certain headers such as Content-Length
- * and Connection are automatically written when needed and
- * values in Header may be ignored. See the documentation
- * for the Request.Write method.
- */
- header: Header
- /**
- * Body is the request's body.
+ * // key is an unexported type for keys defined in this package.
+ * // This prevents collisions with keys defined in other packages.
+ * type key int
*
- * For client requests, a nil body means the request has no
- * body, such as a GET request. The HTTP Client's Transport
- * is responsible for calling the Close method.
+ * // userKey is the key for user.User values in Contexts. It is
+ * // unexported; clients use user.NewContext and user.FromContext
+ * // instead of using this key directly.
+ * var userKey key
*
- * For server requests, the Request Body is always non-nil
- * but will return EOF immediately when no body is present.
- * The Server will close the request body. The ServeHTTP
- * Handler does not need to.
+ * // NewContext returns a new Context that carries value u.
+ * func NewContext(ctx context.Context, u *User) context.Context {
+ * return context.WithValue(ctx, userKey, u)
+ * }
*
- * Body must allow Read to be called concurrently with Close.
- * In particular, calling Close should unblock a Read waiting
- * for input.
+ * // FromContext returns the User value stored in ctx, if any.
+ * func FromContext(ctx context.Context) (*User, bool) {
+ * u, ok := ctx.Value(userKey).(*User)
+ * return u, ok
+ * }
+ * ```
*/
- body: io.ReadCloser
+ value(key: any): any
+ }
+}
+
+/**
+ * Package sql provides a generic interface around SQL (or SQL-like)
+ * databases.
+ *
+ * The sql package must be used in conjunction with a database driver.
+ * See https://golang.org/s/sqldrivers for a list of drivers.
+ *
+ * Drivers that do not support context cancellation will not return until
+ * after the query is completed.
+ *
+ * For usage examples, see the wiki page at
+ * https://golang.org/s/sqlwiki.
+ */
+namespace sql {
+ /**
+ * TxOptions holds the transaction options to be used in [DB.BeginTx].
+ */
+ interface TxOptions {
/**
- * GetBody defines an optional func to return a new copy of
- * Body. It is used for client requests when a redirect requires
- * reading the body more than once. Use of GetBody still
- * requires setting Body.
- *
- * For server requests, it is unused.
+ * Isolation is the transaction isolation level.
+ * If zero, the driver or database's default level is used.
*/
- getBody: () => io.ReadCloser
+ isolation: IsolationLevel
+ readOnly: boolean
+ }
+ /**
+ * DB is a database handle representing a pool of zero or more
+ * underlying connections. It's safe for concurrent use by multiple
+ * goroutines.
+ *
+ * The sql package creates and frees connections automatically; it
+ * also maintains a free pool of idle connections. If the database has
+ * a concept of per-connection state, such state can be reliably observed
+ * within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the
+ * returned [Tx] is bound to a single connection. Once [Tx.Commit] or
+ * [Tx.Rollback] is called on the transaction, that transaction's
+ * connection is returned to [DB]'s idle connection pool. The pool size
+ * can be controlled with [DB.SetMaxIdleConns].
+ */
+ interface DB {
+ }
+ interface DB {
/**
- * ContentLength records the length of the associated content.
- * The value -1 indicates that the length is unknown.
- * Values >= 0 indicate that the given number of bytes may
- * be read from Body.
- *
- * For client requests, a value of 0 with a non-nil Body is
- * also treated as unknown.
+ * PingContext verifies a connection to the database is still alive,
+ * establishing a connection if necessary.
*/
- contentLength: number
+ pingContext(ctx: context.Context): void
+ }
+ interface DB {
/**
- * TransferEncoding lists the transfer encodings from outermost to
- * innermost. An empty list denotes the "identity" encoding.
- * TransferEncoding can usually be ignored; chunked encoding is
- * automatically added and removed as necessary when sending and
- * receiving requests.
+ * Ping verifies a connection to the database is still alive,
+ * establishing a connection if necessary.
+ *
+ * Ping uses [context.Background] internally; to specify the context, use
+ * [DB.PingContext].
*/
- transferEncoding: Array
+ ping(): void
+ }
+ interface DB {
/**
- * Close indicates whether to close the connection after
- * replying to this request (for servers) or after sending this
- * request and reading its response (for clients).
+ * Close closes the database and prevents new queries from starting.
+ * Close then waits for all queries that have started processing on the server
+ * to finish.
*
- * For server requests, the HTTP server handles this automatically
- * and this field is not needed by Handlers.
- *
- * For client requests, setting this field prevents re-use of
- * TCP connections between requests to the same hosts, as if
- * Transport.DisableKeepAlives were set.
+ * It is rare to Close a [DB], as the [DB] handle is meant to be
+ * long-lived and shared between many goroutines.
*/
- close: boolean
+ close(): void
+ }
+ interface DB {
/**
- * For server requests, Host specifies the host on which the
- * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
- * is either the value of the "Host" header or the host name
- * given in the URL itself. For HTTP/2, it is the value of the
- * ":authority" pseudo-header field.
- * It may be of the form "host:port". For international domain
- * names, Host may be in Punycode or Unicode form. Use
- * golang.org/x/net/idna to convert it to either format if
- * needed.
- * To prevent DNS rebinding attacks, server Handlers should
- * validate that the Host header has a value for which the
- * Handler considers itself authoritative. The included
- * ServeMux supports patterns registered to particular host
- * names and thus protects its registered Handlers.
+ * SetMaxIdleConns sets the maximum number of connections in the idle
+ * connection pool.
*
- * For client requests, Host optionally overrides the Host
- * header to send. If empty, the Request.Write method uses
- * the value of URL.Host. Host may contain an international
- * domain name.
- */
- host: string
- /**
- * Form contains the parsed form data, including both the URL
- * field's query parameters and the PATCH, POST, or PUT form data.
- * This field is only available after ParseForm is called.
- * The HTTP client ignores Form and uses Body instead.
- */
- form: url.Values
- /**
- * PostForm contains the parsed form data from PATCH, POST
- * or PUT body parameters.
+ * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
+ * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
*
- * This field is only available after ParseForm is called.
- * The HTTP client ignores PostForm and uses Body instead.
- */
- postForm: url.Values
- /**
- * MultipartForm is the parsed multipart form, including file uploads.
- * This field is only available after ParseMultipartForm is called.
- * The HTTP client ignores MultipartForm and uses Body instead.
+ * If n <= 0, no idle connections are retained.
+ *
+ * The default max idle connections is currently 2. This may change in
+ * a future release.
*/
- multipartForm?: multipart.Form
+ setMaxIdleConns(n: number): void
+ }
+ interface DB {
/**
- * Trailer specifies additional headers that are sent after the request
- * body.
- *
- * For server requests, the Trailer map initially contains only the
- * trailer keys, with nil values. (The client declares which trailers it
- * will later send.) While the handler is reading from Body, it must
- * not reference Trailer. After reading from Body returns EOF, Trailer
- * can be read again and will contain non-nil values, if they were sent
- * by the client.
+ * SetMaxOpenConns sets the maximum number of open connections to the database.
*
- * For client requests, Trailer must be initialized to a map containing
- * the trailer keys to later send. The values may be nil or their final
- * values. The ContentLength must be 0 or -1, to send a chunked request.
- * After the HTTP request is sent the map values can be updated while
- * the request body is read. Once the body returns EOF, the caller must
- * not mutate Trailer.
+ * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
+ * MaxIdleConns, then MaxIdleConns will be reduced to match the new
+ * MaxOpenConns limit.
*
- * Few HTTP clients, servers, or proxies support HTTP trailers.
- */
- trailer: Header
- /**
- * RemoteAddr allows HTTP servers and other software to record
- * the network address that sent the request, usually for
- * logging. This field is not filled in by ReadRequest and
- * has no defined format. The HTTP server in this package
- * sets RemoteAddr to an "IP:port" address before invoking a
- * handler.
- * This field is ignored by the HTTP client.
- */
- remoteAddr: string
- /**
- * RequestURI is the unmodified request-target of the
- * Request-Line (RFC 7230, Section 3.1.1) as sent by the client
- * to a server. Usually the URL field should be used instead.
- * It is an error to set this field in an HTTP client request.
+ * If n <= 0, then there is no limit on the number of open connections.
+ * The default is 0 (unlimited).
*/
- requestURI: string
+ setMaxOpenConns(n: number): void
+ }
+ interface DB {
/**
- * TLS allows HTTP servers and other software to record
- * information about the TLS connection on which the request
- * was received. This field is not filled in by ReadRequest.
- * The HTTP server in this package sets the field for
- * TLS-enabled connections before invoking a handler;
- * otherwise it leaves the field nil.
- * This field is ignored by the HTTP client.
+ * SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
+ *
+ * Expired connections may be closed lazily before reuse.
+ *
+ * If d <= 0, connections are not closed due to a connection's age.
*/
- tls?: any
+ setConnMaxLifetime(d: time.Duration): void
+ }
+ interface DB {
/**
- * Cancel is an optional channel whose closure indicates that the client
- * request should be regarded as canceled. Not all implementations of
- * RoundTripper may support Cancel.
+ * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
*
- * For server requests, this field is not applicable.
+ * Expired connections may be closed lazily before reuse.
*
- * Deprecated: Set the Request's context with NewRequestWithContext
- * instead. If a Request's Cancel field and context are both
- * set, it is undefined whether Cancel is respected.
+ * If d <= 0, connections are not closed due to a connection's idle time.
*/
- cancel: undefined
+ setConnMaxIdleTime(d: time.Duration): void
+ }
+ interface DB {
/**
- * Response is the redirect response which caused this request
- * to be created. This field is only populated during client
- * redirects.
+ * Stats returns database statistics.
*/
- response?: Response
+ stats(): DBStats
}
- interface Request {
+ interface DB {
/**
- * Context returns the request's context. To change the context, use
- * Clone or WithContext.
- *
- * The returned context is always non-nil; it defaults to the
- * background context.
- *
- * For outgoing client requests, the context controls cancellation.
+ * PrepareContext creates a prepared statement for later queries or executions.
+ * Multiple queries or executions may be run concurrently from the
+ * returned statement.
+ * The caller must call the statement's [*Stmt.Close] method
+ * when the statement is no longer needed.
*
- * For incoming server requests, the context is canceled when the
- * client's connection closes, the request is canceled (with HTTP/2),
- * or when the ServeHTTP method returns.
+ * The provided context is used for the preparation of the statement, not for the
+ * execution of the statement.
*/
- context(): context.Context
+ prepareContext(ctx: context.Context, query: string): (Stmt)
}
- interface Request {
+ interface DB {
/**
- * WithContext returns a shallow copy of r with its context changed
- * to ctx. The provided ctx must be non-nil.
- *
- * For outgoing client request, the context controls the entire
- * lifetime of a request and its response: obtaining a connection,
- * sending the request, and reading the response headers and body.
+ * Prepare creates a prepared statement for later queries or executions.
+ * Multiple queries or executions may be run concurrently from the
+ * returned statement.
+ * The caller must call the statement's [*Stmt.Close] method
+ * when the statement is no longer needed.
*
- * To create a new request with a context, use NewRequestWithContext.
- * To make a deep copy of a request with a new context, use Request.Clone.
+ * Prepare uses [context.Background] internally; to specify the context, use
+ * [DB.PrepareContext].
*/
- withContext(ctx: context.Context): (Request)
+ prepare(query: string): (Stmt)
}
- interface Request {
+ interface DB {
/**
- * Clone returns a deep copy of r with its context changed to ctx.
- * The provided ctx must be non-nil.
- *
- * For an outgoing client request, the context controls the entire
- * lifetime of a request and its response: obtaining a connection,
- * sending the request, and reading the response headers and body.
+ * ExecContext executes a query without returning any rows.
+ * The args are for any placeholder parameters in the query.
*/
- clone(ctx: context.Context): (Request)
+ execContext(ctx: context.Context, query: string, ...args: any[]): Result
}
- interface Request {
+ interface DB {
/**
- * ProtoAtLeast reports whether the HTTP protocol used
- * in the request is at least major.minor.
+ * Exec executes a query without returning any rows.
+ * The args are for any placeholder parameters in the query.
+ *
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [DB.ExecContext].
*/
- protoAtLeast(major: number): boolean
+ exec(query: string, ...args: any[]): Result
}
- interface Request {
+ interface DB {
/**
- * UserAgent returns the client's User-Agent, if sent in the request.
+ * QueryContext executes a query that returns rows, typically a SELECT.
+ * The args are for any placeholder parameters in the query.
*/
- userAgent(): string
+ queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
}
- interface Request {
+ interface DB {
/**
- * Cookies parses and returns the HTTP cookies sent with the request.
+ * Query executes a query that returns rows, typically a SELECT.
+ * The args are for any placeholder parameters in the query.
+ *
+ * Query uses [context.Background] internally; to specify the context, use
+ * [DB.QueryContext].
*/
- cookies(): Array<(Cookie | undefined)>
+ query(query: string, ...args: any[]): (Rows)
}
- interface Request {
+ interface DB {
/**
- * Cookie returns the named cookie provided in the request or
- * ErrNoCookie if not found.
- * If multiple cookies match the given name, only one cookie will
- * be returned.
+ * QueryRowContext executes a query that is expected to return at most one row.
+ * QueryRowContext always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- cookie(name: string): (Cookie)
+ queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
}
- interface Request {
+ interface DB {
/**
- * AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
- * AddCookie does not attach more than one Cookie header field. That
- * means all cookies, if any, are written into the same line,
- * separated by semicolon.
- * AddCookie only sanitizes c's name and value, and does not sanitize
- * a Cookie header already present in the request.
+ * QueryRow executes a query that is expected to return at most one row.
+ * QueryRow always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, [*Row.Scan] scans the first selected row and discards
+ * the rest.
+ *
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [DB.QueryRowContext].
*/
- addCookie(c: Cookie): void
+ queryRow(query: string, ...args: any[]): (Row)
}
- interface Request {
+ interface DB {
/**
- * Referer returns the referring URL, if sent in the request.
+ * BeginTx starts a transaction.
*
- * Referer is misspelled as in the request itself, a mistake from the
- * earliest days of HTTP. This value can also be fetched from the
- * Header map as Header["Referer"]; the benefit of making it available
- * as a method is that the compiler can diagnose programs that use the
- * alternate (correct English) spelling req.Referrer() but cannot
- * diagnose programs that use Header["Referrer"].
+ * The provided context is used until the transaction is committed or rolled back.
+ * If the context is canceled, the sql package will roll back
+ * the transaction. [Tx.Commit] will return an error if the context provided to
+ * BeginTx is canceled.
+ *
+ * The provided [TxOptions] is optional and may be nil if defaults should be used.
+ * If a non-default isolation level is used that the driver doesn't support,
+ * an error will be returned.
*/
- referer(): string
+ beginTx(ctx: context.Context, opts: TxOptions): (Tx)
}
- interface Request {
+ interface DB {
/**
- * MultipartReader returns a MIME multipart reader if this is a
- * multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
- * Use this function instead of ParseMultipartForm to
- * process the request body as a stream.
+ * Begin starts a transaction. The default isolation level is dependent on
+ * the driver.
+ *
+ * Begin uses [context.Background] internally; to specify the context, use
+ * [DB.BeginTx].
*/
- multipartReader(): (multipart.Reader)
+ begin(): (Tx)
}
- interface Request {
+ interface DB {
/**
- * Write writes an HTTP/1.1 request, which is the header and body, in wire format.
- * This method consults the following fields of the request:
- *
- * ```
- * Host
- * URL
- * Method (defaults to "GET")
- * Header
- * ContentLength
- * TransferEncoding
- * Body
- * ```
+ * Driver returns the database's underlying driver.
+ */
+ driver(): any
+ }
+ interface DB {
+ /**
+ * Conn returns a single connection by either opening a new connection
+ * or returning an existing connection from the connection pool. Conn will
+ * block until either a connection is returned or ctx is canceled.
+ * Queries run on the same Conn will be run in the same database session.
*
- * If Body is present, Content-Length is <= 0 and TransferEncoding
- * hasn't been set to "identity", Write adds "Transfer-Encoding:
- * chunked" to the header. Body is closed after it is sent.
+ * Every Conn must be returned to the database pool after use by
+ * calling [Conn.Close].
*/
- write(w: io.Writer): void
+ conn(ctx: context.Context): (Conn)
}
- interface Request {
+ /**
+ * Tx is an in-progress database transaction.
+ *
+ * A transaction must end with a call to [Tx.Commit] or [Tx.Rollback].
+ *
+ * After a call to [Tx.Commit] or [Tx.Rollback], all operations on the
+ * transaction fail with [ErrTxDone].
+ *
+ * The statements prepared for a transaction by calling
+ * the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed
+ * by the call to [Tx.Commit] or [Tx.Rollback].
+ */
+ interface Tx {
+ }
+ interface Tx {
/**
- * WriteProxy is like Write but writes the request in the form
- * expected by an HTTP proxy. In particular, WriteProxy writes the
- * initial Request-URI line of the request with an absolute URI, per
- * section 5.3 of RFC 7230, including the scheme and host.
- * In either case, WriteProxy also writes a Host header, using
- * either r.Host or r.URL.Host.
+ * Commit commits the transaction.
*/
- writeProxy(w: io.Writer): void
+ commit(): void
}
- interface Request {
+ interface Tx {
/**
- * BasicAuth returns the username and password provided in the request's
- * Authorization header, if the request uses HTTP Basic Authentication.
- * See RFC 2617, Section 2.
+ * Rollback aborts the transaction.
*/
- basicAuth(): [string, boolean]
+ rollback(): void
}
- interface Request {
+ interface Tx {
/**
- * SetBasicAuth sets the request's Authorization header to use HTTP
- * Basic Authentication with the provided username and password.
+ * PrepareContext creates a prepared statement for use within a transaction.
*
- * With HTTP Basic Authentication the provided username and password
- * are not encrypted. It should generally only be used in an HTTPS
- * request.
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
*
- * The username may not contain a colon. Some protocols may impose
- * additional requirements on pre-escaping the username and
- * password. For instance, when used with OAuth2, both arguments must
- * be URL encoded first with url.QueryEscape.
+ * To use an existing prepared statement on this transaction, see [Tx.Stmt].
+ *
+ * The provided context will be used for the preparation of the context, not
+ * for the execution of the returned statement. The returned statement
+ * will run in the transaction context.
*/
- setBasicAuth(username: string): void
+ prepareContext(ctx: context.Context, query: string): (Stmt)
}
- interface Request {
+ interface Tx {
/**
- * ParseForm populates r.Form and r.PostForm.
+ * Prepare creates a prepared statement for use within a transaction.
*
- * For all requests, ParseForm parses the raw query from the URL and updates
- * r.Form.
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
*
- * For POST, PUT, and PATCH requests, it also reads the request body, parses it
- * as a form and puts the results into both r.PostForm and r.Form. Request body
- * parameters take precedence over URL query string values in r.Form.
+ * To use an existing prepared statement on this transaction, see [Tx.Stmt].
*
- * If the request Body's size has not already been limited by MaxBytesReader,
- * the size is capped at 10MB.
+ * Prepare uses [context.Background] internally; to specify the context, use
+ * [Tx.PrepareContext].
+ */
+ prepare(query: string): (Stmt)
+ }
+ interface Tx {
+ /**
+ * StmtContext returns a transaction-specific prepared statement from
+ * an existing statement.
*
- * For other HTTP methods, or when the Content-Type is not
- * application/x-www-form-urlencoded, the request Body is not read, and
- * r.PostForm is initialized to a non-nil, empty value.
+ * Example:
*
- * ParseMultipartForm calls ParseForm automatically.
- * ParseForm is idempotent.
+ * ```
+ * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
+ * ...
+ * tx, err := db.Begin()
+ * ...
+ * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
+ * ```
+ *
+ * The provided context is used for the preparation of the statement, not for the
+ * execution of the statement.
+ *
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
*/
- parseForm(): void
+ stmtContext(ctx: context.Context, stmt: Stmt): (Stmt)
}
- interface Request {
+ interface Tx {
/**
- * ParseMultipartForm parses a request body as multipart/form-data.
- * The whole request body is parsed and up to a total of maxMemory bytes of
- * its file parts are stored in memory, with the remainder stored on
- * disk in temporary files.
- * ParseMultipartForm calls ParseForm if necessary.
- * If ParseForm returns an error, ParseMultipartForm returns it but also
- * continues parsing the request body.
- * After one call to ParseMultipartForm, subsequent calls have no effect.
+ * Stmt returns a transaction-specific prepared statement from
+ * an existing statement.
+ *
+ * Example:
+ *
+ * ```
+ * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
+ * ...
+ * tx, err := db.Begin()
+ * ...
+ * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
+ * ```
+ *
+ * The returned statement operates within the transaction and will be closed
+ * when the transaction has been committed or rolled back.
+ *
+ * Stmt uses [context.Background] internally; to specify the context, use
+ * [Tx.StmtContext].
*/
- parseMultipartForm(maxMemory: number): void
+ stmt(stmt: Stmt): (Stmt)
}
- interface Request {
+ interface Tx {
/**
- * FormValue returns the first value for the named component of the query.
- * POST and PUT body parameters take precedence over URL query string values.
- * FormValue calls ParseMultipartForm and ParseForm if necessary and ignores
- * any errors returned by these functions.
- * If key is not present, FormValue returns the empty string.
- * To access multiple values of the same key, call ParseForm and
- * then inspect Request.Form directly.
+ * ExecContext executes a query that doesn't return rows.
+ * For example: an INSERT and UPDATE.
*/
- formValue(key: string): string
+ execContext(ctx: context.Context, query: string, ...args: any[]): Result
}
- interface Request {
+ interface Tx {
/**
- * PostFormValue returns the first value for the named component of the POST,
- * PATCH, or PUT request body. URL query parameters are ignored.
- * PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores
- * any errors returned by these functions.
- * If key is not present, PostFormValue returns the empty string.
+ * Exec executes a query that doesn't return rows.
+ * For example: an INSERT and UPDATE.
+ *
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [Tx.ExecContext].
*/
- postFormValue(key: string): string
+ exec(query: string, ...args: any[]): Result
}
- interface Request {
+ interface Tx {
/**
- * FormFile returns the first file for the provided form key.
- * FormFile calls ParseMultipartForm and ParseForm if necessary.
+ * QueryContext executes a query that returns rows, typically a SELECT.
*/
- formFile(key: string): [multipart.File, (multipart.FileHeader)]
+ queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
}
- /**
- * A ResponseWriter interface is used by an HTTP handler to
- * construct an HTTP response.
- *
- * A ResponseWriter may not be used after the Handler.ServeHTTP method
- * has returned.
- */
- interface ResponseWriter {
- [key:string]: any;
+ interface Tx {
/**
- * Header returns the header map that will be sent by
- * WriteHeader. The Header map also is the mechanism with which
- * Handlers can set HTTP trailers.
- *
- * Changing the header map after a call to WriteHeader (or
- * Write) has no effect unless the HTTP status code was of the
- * 1xx class or the modified headers are trailers.
- *
- * There are two ways to set Trailers. The preferred way is to
- * predeclare in the headers which trailers you will later
- * send by setting the "Trailer" header to the names of the
- * trailer keys which will come later. In this case, those
- * keys of the Header map are treated as if they were
- * trailers. See the example. The second way, for trailer
- * keys not known to the Handler until after the first Write,
- * is to prefix the Header map keys with the TrailerPrefix
- * constant value. See TrailerPrefix.
+ * Query executes a query that returns rows, typically a SELECT.
*
- * To suppress automatic response headers (such as "Date"), set
- * their value to nil.
+ * Query uses [context.Background] internally; to specify the context, use
+ * [Tx.QueryContext].
*/
- header(): Header
+ query(query: string, ...args: any[]): (Rows)
+ }
+ interface Tx {
/**
- * Write writes the data to the connection as part of an HTTP reply.
- *
- * If WriteHeader has not yet been called, Write calls
- * WriteHeader(http.StatusOK) before writing the data. If the Header
- * does not contain a Content-Type line, Write adds a Content-Type set
- * to the result of passing the initial 512 bytes of written data to
- * DetectContentType. Additionally, if the total size of all written
- * data is under a few KB and there are no Flush calls, the
- * Content-Length header is added automatically.
- *
- * Depending on the HTTP protocol version and the client, calling
- * Write or WriteHeader may prevent future reads on the
- * Request.Body. For HTTP/1.x requests, handlers should read any
- * needed request body data before writing the response. Once the
- * headers have been flushed (due to either an explicit Flusher.Flush
- * call or writing enough data to trigger a flush), the request body
- * may be unavailable. For HTTP/2 requests, the Go HTTP server permits
- * handlers to continue to read the request body while concurrently
- * writing the response. However, such behavior may not be supported
- * by all HTTP/2 clients. Handlers should read before writing if
- * possible to maximize compatibility.
+ * QueryRowContext executes a query that is expected to return at most one row.
+ * QueryRowContext always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- write(_arg0: string|Array): number
+ queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+ }
+ interface Tx {
/**
- * WriteHeader sends an HTTP response header with the provided
- * status code.
- *
- * If WriteHeader is not called explicitly, the first call to Write
- * will trigger an implicit WriteHeader(http.StatusOK).
- * Thus explicit calls to WriteHeader are mainly used to
- * send error codes or 1xx informational responses.
- *
- * The provided code must be a valid HTTP 1xx-5xx status code.
- * Any number of 1xx headers may be written, followed by at most
- * one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx
- * headers may be buffered. Use the Flusher interface to send
- * buffered data. The header map is cleared when 2xx-5xx headers are
- * sent, but not with 1xx headers.
+ * QueryRow executes a query that is expected to return at most one row.
+ * QueryRow always returns a non-nil value. Errors are deferred until
+ * [Row]'s Scan method is called.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*
- * The server will automatically send a 100 (Continue) header
- * on the first read from the request body if the request has
- * an "Expect: 100-continue" header.
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [Tx.QueryRowContext].
*/
- writeHeader(statusCode: number): void
+ queryRow(query: string, ...args: any[]): (Row)
}
/**
- * A Server defines parameters for running an HTTP server.
- * The zero value for Server is a valid configuration.
+ * Stmt is a prepared statement.
+ * A Stmt is safe for concurrent use by multiple goroutines.
+ *
+ * If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single
+ * underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will
+ * become unusable and all operations will return an error.
+ * If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the
+ * [DB]. When the Stmt needs to execute on a new underlying connection, it will
+ * prepare itself on the new connection automatically.
*/
- interface Server {
+ interface Stmt {
+ }
+ interface Stmt {
/**
- * Addr optionally specifies the TCP address for the server to listen on,
- * in the form "host:port". If empty, ":http" (port 80) is used.
- * The service names are defined in RFC 6335 and assigned by IANA.
- * See net.Dial for details of the address format.
+ * ExecContext executes a prepared statement with the given arguments and
+ * returns a [Result] summarizing the effect of the statement.
*/
- addr: string
- handler: Handler // handler to invoke, http.DefaultServeMux if nil
- /**
- * DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the Handler,
- * otherwise responds with 200 OK and Content-Length: 0.
- */
- disableGeneralOptionsHandler: boolean
- /**
- * TLSConfig optionally provides a TLS configuration for use
- * by ServeTLS and ListenAndServeTLS. Note that this value is
- * cloned by ServeTLS and ListenAndServeTLS, so it's not
- * possible to modify the configuration with methods like
- * tls.Config.SetSessionTicketKeys. To use
- * SetSessionTicketKeys, use Server.Serve with a TLS Listener
- * instead.
- */
- tlsConfig?: any
+ execContext(ctx: context.Context, ...args: any[]): Result
+ }
+ interface Stmt {
/**
- * ReadTimeout is the maximum duration for reading the entire
- * request, including the body. A zero or negative value means
- * there will be no timeout.
+ * Exec executes a prepared statement with the given arguments and
+ * returns a [Result] summarizing the effect of the statement.
*
- * Because ReadTimeout does not let Handlers make per-request
- * decisions on each request body's acceptable deadline or
- * upload rate, most users will prefer to use
- * ReadHeaderTimeout. It is valid to use them both.
- */
- readTimeout: time.Duration
- /**
- * ReadHeaderTimeout is the amount of time allowed to read
- * request headers. The connection's read deadline is reset
- * after reading the headers and the Handler can decide what
- * is considered too slow for the body. If ReadHeaderTimeout
- * is zero, the value of ReadTimeout is used. If both are
- * zero, there is no timeout.
- */
- readHeaderTimeout: time.Duration
- /**
- * WriteTimeout is the maximum duration before timing out
- * writes of the response. It is reset whenever a new
- * request's header is read. Like ReadTimeout, it does not
- * let Handlers make decisions on a per-request basis.
- * A zero or negative value means there will be no timeout.
- */
- writeTimeout: time.Duration
- /**
- * IdleTimeout is the maximum amount of time to wait for the
- * next request when keep-alives are enabled. If IdleTimeout
- * is zero, the value of ReadTimeout is used. If both are
- * zero, there is no timeout.
- */
- idleTimeout: time.Duration
- /**
- * MaxHeaderBytes controls the maximum number of bytes the
- * server will read parsing the request header's keys and
- * values, including the request line. It does not limit the
- * size of the request body.
- * If zero, DefaultMaxHeaderBytes is used.
- */
- maxHeaderBytes: number
- /**
- * TLSNextProto optionally specifies a function to take over
- * ownership of the provided TLS connection when an ALPN
- * protocol upgrade has occurred. The map key is the protocol
- * name negotiated. The Handler argument should be used to
- * handle HTTP requests and will initialize the Request's TLS
- * and RemoteAddr if not already set. The connection is
- * automatically closed when the function returns.
- * If TLSNextProto is not nil, HTTP/2 support is not enabled
- * automatically.
- */
- tlsNextProto: _TygojaDict
- /**
- * ConnState specifies an optional callback function that is
- * called when a client connection changes state. See the
- * ConnState type and associated constants for details.
- */
- connState: (_arg0: net.Conn, _arg1: ConnState) => void
- /**
- * ErrorLog specifies an optional logger for errors accepting
- * connections, unexpected behavior from handlers, and
- * underlying FileSystem errors.
- * If nil, logging is done via the log package's standard logger.
+ * Exec uses [context.Background] internally; to specify the context, use
+ * [Stmt.ExecContext].
*/
- errorLog?: any
+ exec(...args: any[]): Result
+ }
+ interface Stmt {
/**
- * BaseContext optionally specifies a function that returns
- * the base context for incoming requests on this server.
- * The provided Listener is the specific Listener that's
- * about to start accepting requests.
- * If BaseContext is nil, the default is context.Background().
- * If non-nil, it must return a non-nil context.
+ * QueryContext executes a prepared query statement with the given arguments
+ * and returns the query results as a [*Rows].
*/
- baseContext: (_arg0: net.Listener) => context.Context
+ queryContext(ctx: context.Context, ...args: any[]): (Rows)
+ }
+ interface Stmt {
/**
- * ConnContext optionally specifies a function that modifies
- * the context used for a new connection c. The provided ctx
- * is derived from the base context and has a ServerContextKey
- * value.
+ * Query executes a prepared query statement with the given arguments
+ * and returns the query results as a *Rows.
+ *
+ * Query uses [context.Background] internally; to specify the context, use
+ * [Stmt.QueryContext].
*/
- connContext: (ctx: context.Context, c: net.Conn) => context.Context
+ query(...args: any[]): (Rows)
}
- interface Server {
+ interface Stmt {
/**
- * Close immediately closes all active net.Listeners and any
- * connections in state StateNew, StateActive, or StateIdle. For a
- * graceful shutdown, use Shutdown.
- *
- * Close does not attempt to close (and does not even know about)
- * any hijacked connections, such as WebSockets.
- *
- * Close returns any error returned from closing the Server's
- * underlying Listener(s).
+ * QueryRowContext executes a prepared query statement with the given arguments.
+ * If an error occurs during the execution of the statement, that error will
+ * be returned by a call to Scan on the returned [*Row], which is always non-nil.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*/
- close(): void
+ queryRowContext(ctx: context.Context, ...args: any[]): (Row)
}
- interface Server {
+ interface Stmt {
/**
- * Shutdown gracefully shuts down the server without interrupting any
- * active connections. Shutdown works by first closing all open
- * listeners, then closing all idle connections, and then waiting
- * indefinitely for connections to return to idle and then shut down.
- * If the provided context expires before the shutdown is complete,
- * Shutdown returns the context's error, otherwise it returns any
- * error returned from closing the Server's underlying Listener(s).
+ * QueryRow executes a prepared query statement with the given arguments.
+ * If an error occurs during the execution of the statement, that error will
+ * be returned by a call to Scan on the returned [*Row], which is always non-nil.
+ * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+ * Otherwise, the [*Row.Scan] scans the first selected row and discards
+ * the rest.
*
- * When Shutdown is called, Serve, ListenAndServe, and
- * ListenAndServeTLS immediately return ErrServerClosed. Make sure the
- * program doesn't exit and waits instead for Shutdown to return.
+ * Example usage:
*
- * Shutdown does not attempt to close nor wait for hijacked
- * connections such as WebSockets. The caller of Shutdown should
- * separately notify such long-lived connections of shutdown and wait
- * for them to close, if desired. See RegisterOnShutdown for a way to
- * register shutdown notification functions.
+ * ```
+ * var name string
+ * err := nameByUseridStmt.QueryRow(id).Scan(&name)
+ * ```
*
- * Once Shutdown has been called on a server, it may not be reused;
- * future calls to methods such as Serve will return ErrServerClosed.
+ * QueryRow uses [context.Background] internally; to specify the context, use
+ * [Stmt.QueryRowContext].
*/
- shutdown(ctx: context.Context): void
+ queryRow(...args: any[]): (Row)
}
- interface Server {
+ interface Stmt {
/**
- * RegisterOnShutdown registers a function to call on Shutdown.
- * This can be used to gracefully shutdown connections that have
- * undergone ALPN protocol upgrade or that have been hijacked.
- * This function should start protocol-specific graceful shutdown,
- * but should not wait for shutdown to complete.
+ * Close closes the statement.
*/
- registerOnShutdown(f: () => void): void
+ close(): void
}
- interface Server {
+ /**
+ * Rows is the result of a query. Its cursor starts before the first row
+ * of the result set. Use [Rows.Next] to advance from row to row.
+ */
+ interface Rows {
+ }
+ interface Rows {
/**
- * ListenAndServe listens on the TCP network address srv.Addr and then
- * calls Serve to handle requests on incoming connections.
- * Accepted connections are configured to enable TCP keep-alives.
- *
- * If srv.Addr is blank, ":http" is used.
+ * Next prepares the next result row for reading with the [Rows.Scan] method. It
+ * returns true on success, or false if there is no next result row or an error
+ * happened while preparing it. [Rows.Err] should be consulted to distinguish between
+ * the two cases.
*
- * ListenAndServe always returns a non-nil error. After Shutdown or Close,
- * the returned error is ErrServerClosed.
+ * Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next].
*/
- listenAndServe(): void
+ next(): boolean
}
- interface Server {
+ interface Rows {
/**
- * Serve accepts incoming connections on the Listener l, creating a
- * new service goroutine for each. The service goroutines read requests and
- * then call srv.Handler to reply to them.
- *
- * HTTP/2 support is only enabled if the Listener returns *tls.Conn
- * connections and they were configured with "h2" in the TLS
- * Config.NextProtos.
+ * NextResultSet prepares the next result set for reading. It reports whether
+ * there is further result sets, or false if there is no further result set
+ * or if there is an error advancing to it. The [Rows.Err] method should be consulted
+ * to distinguish between the two cases.
*
- * Serve always returns a non-nil error and closes l.
- * After Shutdown or Close, the returned error is ErrServerClosed.
+ * After calling NextResultSet, the [Rows.Next] method should always be called before
+ * scanning. If there are further result sets they may not have rows in the result
+ * set.
*/
- serve(l: net.Listener): void
+ nextResultSet(): boolean
}
- interface Server {
+ interface Rows {
/**
- * ServeTLS accepts incoming connections on the Listener l, creating a
- * new service goroutine for each. The service goroutines perform TLS
- * setup and then read requests, calling srv.Handler to reply to them.
- *
- * Files containing a certificate and matching private key for the
- * server must be provided if neither the Server's
- * TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
- * If the certificate is signed by a certificate authority, the
- * certFile should be the concatenation of the server's certificate,
- * any intermediates, and the CA's certificate.
- *
- * ServeTLS always returns a non-nil error. After Shutdown or Close, the
- * returned error is ErrServerClosed.
+ * Err returns the error, if any, that was encountered during iteration.
+ * Err may be called after an explicit or implicit [Rows.Close].
*/
- serveTLS(l: net.Listener, certFile: string): void
+ err(): void
}
- interface Server {
+ interface Rows {
/**
- * SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
- * By default, keep-alives are always enabled. Only very
- * resource-constrained environments or servers in the process of
- * shutting down should disable them.
+ * Columns returns the column names.
+ * Columns returns an error if the rows are closed.
*/
- setKeepAlivesEnabled(v: boolean): void
+ columns(): Array
}
- interface Server {
+ interface Rows {
/**
- * ListenAndServeTLS listens on the TCP network address srv.Addr and
- * then calls ServeTLS to handle requests on incoming TLS connections.
- * Accepted connections are configured to enable TCP keep-alives.
+ * ColumnTypes returns column information such as column type, length,
+ * and nullable. Some information may not be available from some drivers.
+ */
+ columnTypes(): Array<(ColumnType | undefined)>
+ }
+ interface Rows {
+ /**
+ * Scan copies the columns in the current row into the values pointed
+ * at by dest. The number of values in dest must be the same as the
+ * number of columns in [Rows].
*
- * Filenames containing a certificate and matching private key for the
- * server must be provided if neither the Server's TLSConfig.Certificates
- * nor TLSConfig.GetCertificate are populated. If the certificate is
- * signed by a certificate authority, the certFile should be the
- * concatenation of the server's certificate, any intermediates, and
- * the CA's certificate.
+ * Scan converts columns read from the database into the following
+ * common Go types and special types provided by the sql package:
*
- * If srv.Addr is blank, ":https" is used.
+ * ```
+ * *string
+ * *[]byte
+ * *int, *int8, *int16, *int32, *int64
+ * *uint, *uint8, *uint16, *uint32, *uint64
+ * *bool
+ * *float32, *float64
+ * *interface{}
+ * *RawBytes
+ * *Rows (cursor value)
+ * any type implementing Scanner (see Scanner docs)
+ * ```
*
- * ListenAndServeTLS always returns a non-nil error. After Shutdown or
- * Close, the returned error is ErrServerClosed.
- */
- listenAndServeTLS(certFile: string): void
- }
-}
-
-/**
- * Package blob provides an easy and portable way to interact with blobs
- * within a storage location. Subpackages contain driver implementations of
- * blob for supported services.
- *
- * See https://gocloud.dev/howto/blob/ for a detailed how-to guide.
- *
- * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with
- * functions in that package.
- *
- * # Errors
- *
- * The errors returned from this package can be inspected in several ways:
- *
- * The Code function from gocloud.dev/gcerrors will return an error code, also
- * defined in that package, when invoked on an error.
- *
- * The Bucket.ErrorAs method can retrieve the driver error underlying the returned
- * error.
- *
- * # OpenCensus Integration
- *
- * OpenCensus supports tracing and metric collection for multiple languages and
- * backend providers. See https://opencensus.io.
- *
- * This API collects OpenCensus traces and metrics for the following methods:
- * ```
- * - Attributes
- * - Copy
- * - Delete
- * - ListPage
- * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll
- * are included because they call NewRangeReader.)
- * - NewWriter, from creation until the call to Close.
- * ```
- *
- * All trace and metric names begin with the package import path.
- * The traces add the method name.
- * For example, "gocloud.dev/blob/Attributes".
- * The metrics are "completed_calls", a count of completed method calls by driver,
- * method and status (error code); and "latency", a distribution of method latency
- * by driver and method.
- * For example, "gocloud.dev/blob/latency".
- *
- * It also collects the following metrics:
- * ```
- * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver.
- * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver.
- * ```
- *
- * To enable trace collection in your application, see "Configure Exporter" at
- * https://opencensus.io/quickstart/go/tracing.
- * To enable metric collection in your application, see "Exporting stats" at
- * https://opencensus.io/quickstart/go/metrics.
- */
-namespace blob {
- /**
- * Reader reads bytes from a blob.
- * It implements io.ReadSeekCloser, and must be closed after
- * reads are finished.
- */
- interface Reader {
- }
- interface Reader {
- /**
- * Read implements io.Reader (https://golang.org/pkg/io/#Reader).
- */
- read(p: string|Array): number
- }
- interface Reader {
- /**
- * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
+ * In the most simple case, if the type of the value from the source
+ * column is an integer, bool or string type T and dest is of type *T,
+ * Scan simply assigns the value through the pointer.
+ *
+ * Scan also converts between string and numeric types, as long as no
+ * information would be lost. While Scan stringifies all numbers
+ * scanned from numeric database columns into *string, scans into
+ * numeric types are checked for overflow. For example, a float64 with
+ * value 300 or a string with value "300" can scan into a uint16, but
+ * not into a uint8, though float64(255) or "255" can scan into a
+ * uint8. One exception is that scans of some float64 numbers to
+ * strings may lose information when stringifying. In general, scan
+ * floating point columns into *float64.
+ *
+ * If a dest argument has type *[]byte, Scan saves in that argument a
+ * copy of the corresponding data. The copy is owned by the caller and
+ * can be modified and held indefinitely. The copy can be avoided by
+ * using an argument of type [*RawBytes] instead; see the documentation
+ * for [RawBytes] for restrictions on its use.
+ *
+ * If an argument has type *interface{}, Scan copies the value
+ * provided by the underlying driver without conversion. When scanning
+ * from a source value of type []byte to *interface{}, a copy of the
+ * slice is made and the caller owns the result.
+ *
+ * Source values of type [time.Time] may be scanned into values of type
+ * *time.Time, *interface{}, *string, or *[]byte. When converting to
+ * the latter two, [time.RFC3339Nano] is used.
+ *
+ * Source values of type bool may be scanned into types *bool,
+ * *interface{}, *string, *[]byte, or [*RawBytes].
+ *
+ * For scanning into *bool, the source may be true, false, 1, 0, or
+ * string inputs parseable by [strconv.ParseBool].
+ *
+ * Scan can also convert a cursor returned from a query, such as
+ * "select cursor(select * from my_table) from dual", into a
+ * [*Rows] value that can itself be scanned from. The parent
+ * select query will close any cursor [*Rows] if the parent [*Rows] is closed.
+ *
+ * If any of the first arguments implementing [Scanner] returns an error,
+ * that error will be wrapped in the returned error.
*/
- seek(offset: number, whence: number): number
+ scan(...dest: any[]): void
}
- interface Reader {
+ interface Rows {
/**
- * Close implements io.Closer (https://golang.org/pkg/io/#Closer).
+ * Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called
+ * and returns false and there are no further result sets,
+ * the [Rows] are closed automatically and it will suffice to check the
+ * result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err].
*/
close(): void
}
- interface Reader {
- /**
- * ContentType returns the MIME type of the blob.
- */
- contentType(): string
- }
- interface Reader {
- /**
- * ModTime returns the time the blob was last modified.
- */
- modTime(): time.Time
- }
- interface Reader {
- /**
- * Size returns the size of the blob content in bytes.
- */
- size(): number
- }
- interface Reader {
+ /**
+ * A Result summarizes an executed SQL command.
+ */
+ interface Result {
+ [key:string]: any;
/**
- * As converts i to driver-specific types.
- * See https://gocloud.dev/concepts/as/ for background information, the "As"
- * examples in this package for examples, and the driver package
- * documentation for the specific types supported for that driver.
+ * LastInsertId returns the integer generated by the database
+ * in response to a command. Typically this will be from an
+ * "auto increment" column when inserting a new row. Not all
+ * databases support this feature, and the syntax of such
+ * statements varies.
*/
- as(i: {
- }): boolean
- }
- interface Reader {
+ lastInsertId(): number
/**
- * WriteTo reads from r and writes to w until there's no more data or
- * an error occurs.
- * The return value is the number of bytes written to w.
- *
- * It implements the io.WriterTo interface.
+ * RowsAffected returns the number of rows affected by an
+ * update, insert, or delete. Not every database or database
+ * driver may support this.
*/
- writeTo(w: io.Writer): number
+ rowsAffected(): number
}
+}
+
+/**
+ * Package multipart implements MIME multipart parsing, as defined in RFC
+ * 2046.
+ *
+ * The implementation is sufficient for HTTP (RFC 2388) and the multipart
+ * bodies generated by popular browsers.
+ *
+ * # Limits
+ *
+ * To protect against malicious inputs, this package sets limits on the size
+ * of the MIME data it processes.
+ *
+ * Reader.NextPart and Reader.NextRawPart limit the number of headers in a
+ * part to 10000 and Reader.ReadForm limits the total number of headers in all
+ * FileHeaders to 10000.
+ * These limits may be adjusted with the GODEBUG=multipartmaxheaders=
+ * setting.
+ *
+ * Reader.ReadForm further limits the number of parts in a form to 1000.
+ * This limit may be adjusted with the GODEBUG=multipartmaxparts=
+ * setting.
+ */
+/**
+ * Copyright 2023 The Go Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file.
+ */
+namespace multipart {
/**
- * Attributes contains attributes about a blob.
+ * A FileHeader describes a file part of a multipart request.
*/
- interface Attributes {
- /**
- * CacheControl specifies caching attributes that services may use
- * when serving the blob.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
- */
- cacheControl: string
- /**
- * ContentDisposition specifies whether the blob content is expected to be
- * displayed inline or as an attachment.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
- */
- contentDisposition: string
- /**
- * ContentEncoding specifies the encoding used for the blob's content, if any.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
- */
- contentEncoding: string
- /**
- * ContentLanguage specifies the language used in the blob's content, if any.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
- */
- contentLanguage: string
- /**
- * ContentType is the MIME type of the blob. It will not be empty.
- * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
- */
- contentType: string
- /**
- * Metadata holds key/value pairs associated with the blob.
- * Keys are guaranteed to be in lowercase, even if the backend service
- * has case-sensitive keys (although note that Metadata written via
- * this package will always be lowercased). If there are duplicate
- * case-insensitive keys (e.g., "foo" and "FOO"), only one value
- * will be kept, and it is undefined which one.
- */
- metadata: _TygojaDict
- /**
- * CreateTime is the time the blob was created, if available. If not available,
- * CreateTime will be the zero time.
- */
- createTime: time.Time
- /**
- * ModTime is the time the blob was last modified.
- */
- modTime: time.Time
- /**
- * Size is the size of the blob's content in bytes.
- */
+ interface FileHeader {
+ filename: string
+ header: textproto.MIMEHeader
size: number
- /**
- * MD5 is an MD5 hash of the blob contents or nil if not available.
- */
- md5: string|Array
- /**
- * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
- */
- eTag: string
}
- interface Attributes {
+ interface FileHeader {
/**
- * As converts i to driver-specific types.
- * See https://gocloud.dev/concepts/as/ for background information, the "As"
- * examples in this package for examples, and the driver package
- * documentation for the specific types supported for that driver.
+ * Open opens and returns the FileHeader's associated File.
*/
- as(i: {
- }): boolean
- }
- /**
- * ListObject represents a single blob returned from List.
- */
- interface ListObject {
- /**
- * Key is the key for this blob.
- */
- key: string
- /**
- * ModTime is the time the blob was last modified.
- */
- modTime: time.Time
- /**
- * Size is the size of the blob's content in bytes.
- */
- size: number
- /**
- * MD5 is an MD5 hash of the blob contents or nil if not available.
- */
- md5: string|Array
- /**
- * IsDir indicates that this result represents a "directory" in the
- * hierarchical namespace, ending in ListOptions.Delimiter. Key can be
- * passed as ListOptions.Prefix to list items in the "directory".
- * Fields other than Key and IsDir will not be set if IsDir is true.
- */
- isDir: boolean
- }
- interface ListObject {
- /**
- * As converts i to driver-specific types.
- * See https://gocloud.dev/concepts/as/ for background information, the "As"
- * examples in this package for examples, and the driver package
- * documentation for the specific types supported for that driver.
- */
- as(i: {
- }): boolean
+ open(): File
}
}
/**
- * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+ * Package http provides HTTP client and server implementations.
*
- * See README.md for more info.
+ * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests:
+ *
+ * ```
+ * resp, err := http.Get("http://example.com/")
+ * ...
+ * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
+ * ...
+ * resp, err := http.PostForm("http://example.com/form",
+ * url.Values{"key": {"Value"}, "id": {"123"}})
+ * ```
+ *
+ * The caller must close the response body when finished with it:
+ *
+ * ```
+ * resp, err := http.Get("http://example.com/")
+ * if err != nil {
+ * // handle error
+ * }
+ * defer resp.Body.Close()
+ * body, err := io.ReadAll(resp.Body)
+ * // ...
+ * ```
+ *
+ * # Clients and Transports
+ *
+ * For control over HTTP client headers, redirect policy, and other
+ * settings, create a [Client]:
+ *
+ * ```
+ * client := &http.Client{
+ * CheckRedirect: redirectPolicyFunc,
+ * }
+ *
+ * resp, err := client.Get("http://example.com")
+ * // ...
+ *
+ * req, err := http.NewRequest("GET", "http://example.com", nil)
+ * // ...
+ * req.Header.Add("If-None-Match", `W/"wyzzy"`)
+ * resp, err := client.Do(req)
+ * // ...
+ * ```
+ *
+ * For control over proxies, TLS configuration, keep-alives,
+ * compression, and other settings, create a [Transport]:
+ *
+ * ```
+ * tr := &http.Transport{
+ * MaxIdleConns: 10,
+ * IdleConnTimeout: 30 * time.Second,
+ * DisableCompression: true,
+ * }
+ * client := &http.Client{Transport: tr}
+ * resp, err := client.Get("https://example.com")
+ * ```
+ *
+ * Clients and Transports are safe for concurrent use by multiple
+ * goroutines and for efficiency should only be created once and re-used.
+ *
+ * # Servers
+ *
+ * ListenAndServe starts an HTTP server with a given address and handler.
+ * The handler is usually nil, which means to use [DefaultServeMux].
+ * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]:
+ *
+ * ```
+ * http.Handle("/foo", fooHandler)
+ *
+ * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
+ * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
+ * })
+ *
+ * log.Fatal(http.ListenAndServe(":8080", nil))
+ * ```
+ *
+ * More control over the server's behavior is available by creating a
+ * custom Server:
+ *
+ * ```
+ * s := &http.Server{
+ * Addr: ":8080",
+ * Handler: myHandler,
+ * ReadTimeout: 10 * time.Second,
+ * WriteTimeout: 10 * time.Second,
+ * MaxHeaderBytes: 1 << 20,
+ * }
+ * log.Fatal(s.ListenAndServe())
+ * ```
+ *
+ * # HTTP/2
+ *
+ * Starting with Go 1.6, the http package has transparent support for the
+ * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
+ * can do so by setting [Transport.TLSNextProto] (for clients) or
+ * [Server.TLSNextProto] (for servers) to a non-nil, empty
+ * map. Alternatively, the following GODEBUG settings are
+ * currently supported:
+ *
+ * ```
+ * GODEBUG=http2client=0 # disable HTTP/2 client support
+ * GODEBUG=http2server=0 # disable HTTP/2 server support
+ * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
+ * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
+ * ```
+ *
+ * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
+ *
+ * The http package's [Transport] and [Server] both automatically enable
+ * HTTP/2 support for simple configurations. To enable HTTP/2 for more
+ * complex configurations, to use lower-level HTTP/2 features, or to use
+ * a newer version of Go's http2 package, import "golang.org/x/net/http2"
+ * directly and use its ConfigureTransport and/or ConfigureServer
+ * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
+ * package takes precedence over the net/http package's built-in HTTP/2
+ * support.
*/
-namespace jwt {
+namespace http {
+ // @ts-ignore
+ import mathrand = rand
+ // @ts-ignore
+ import urlpkg = url
/**
- * MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
- * This is the default claims type if you don't supply one
+ * A Request represents an HTTP request received by a server
+ * or to be sent by a client.
+ *
+ * The field semantics differ slightly between client and server
+ * usage. In addition to the notes on the fields below, see the
+ * documentation for [Request.Write] and [RoundTripper].
*/
- interface MapClaims extends _TygojaDict{}
- interface MapClaims {
+ interface Request {
/**
- * VerifyAudience Compares the aud claim against cmp.
- * If required is false, this method will return true if the value matches or is unset
+ * Method specifies the HTTP method (GET, POST, PUT, etc.).
+ * For client requests, an empty string means GET.
*/
- verifyAudience(cmp: string, req: boolean): boolean
- }
- interface MapClaims {
+ method: string
/**
- * VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
- * If req is false, it will return true, if exp is unset.
+ * URL specifies either the URI being requested (for server
+ * requests) or the URL to access (for client requests).
+ *
+ * For server requests, the URL is parsed from the URI
+ * supplied on the Request-Line as stored in RequestURI. For
+ * most requests, fields other than Path and RawQuery will be
+ * empty. (See RFC 7230, Section 5.3)
+ *
+ * For client requests, the URL's Host specifies the server to
+ * connect to, while the Request's Host field optionally
+ * specifies the Host header value to send in the HTTP
+ * request.
*/
- verifyExpiresAt(cmp: number, req: boolean): boolean
- }
- interface MapClaims {
+ url?: url.URL
/**
- * VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
- * If req is false, it will return true, if iat is unset.
+ * The protocol version for incoming server requests.
+ *
+ * For client requests, these fields are ignored. The HTTP
+ * client code always uses either HTTP/1.1 or HTTP/2.
+ * See the docs on Transport for details.
*/
- verifyIssuedAt(cmp: number, req: boolean): boolean
- }
- interface MapClaims {
+ proto: string // "HTTP/1.0"
+ protoMajor: number // 1
+ protoMinor: number // 0
/**
- * VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
- * If req is false, it will return true, if nbf is unset.
+ * Header contains the request header fields either received
+ * by the server or to be sent by the client.
+ *
+ * If a server received a request with header lines,
+ *
+ * ```
+ * Host: example.com
+ * accept-encoding: gzip, deflate
+ * Accept-Language: en-us
+ * fOO: Bar
+ * foo: two
+ * ```
+ *
+ * then
+ *
+ * ```
+ * Header = map[string][]string{
+ * "Accept-Encoding": {"gzip, deflate"},
+ * "Accept-Language": {"en-us"},
+ * "Foo": {"Bar", "two"},
+ * }
+ * ```
+ *
+ * For incoming requests, the Host header is promoted to the
+ * Request.Host field and removed from the Header map.
+ *
+ * HTTP defines that header names are case-insensitive. The
+ * request parser implements this by using CanonicalHeaderKey,
+ * making the first character and any characters following a
+ * hyphen uppercase and the rest lowercase.
+ *
+ * For client requests, certain headers such as Content-Length
+ * and Connection are automatically written when needed and
+ * values in Header may be ignored. See the documentation
+ * for the Request.Write method.
*/
- verifyNotBefore(cmp: number, req: boolean): boolean
- }
- interface MapClaims {
+ header: Header
/**
- * VerifyIssuer compares the iss claim against cmp.
- * If required is false, this method will return true if the value matches or is unset
+ * Body is the request's body.
+ *
+ * For client requests, a nil body means the request has no
+ * body, such as a GET request. The HTTP Client's Transport
+ * is responsible for calling the Close method.
+ *
+ * For server requests, the Request Body is always non-nil
+ * but will return EOF immediately when no body is present.
+ * The Server will close the request body. The ServeHTTP
+ * Handler does not need to.
+ *
+ * Body must allow Read to be called concurrently with Close.
+ * In particular, calling Close should unblock a Read waiting
+ * for input.
*/
- verifyIssuer(cmp: string, req: boolean): boolean
- }
- interface MapClaims {
+ body: io.ReadCloser
/**
- * Valid validates time based claims "exp, iat, nbf".
- * There is no accounting for clock skew.
- * As well, if any of the above claims are not in the token, it will still
- * be considered a valid claim.
+ * GetBody defines an optional func to return a new copy of
+ * Body. It is used for client requests when a redirect requires
+ * reading the body more than once. Use of GetBody still
+ * requires setting Body.
+ *
+ * For server requests, it is unused.
*/
- valid(): void
- }
-}
-
-/**
- * Package types implements some commonly used db serializable types
- * like datetime, json, etc.
- */
-namespace types {
- /**
- * JsonArray defines a slice that is safe for json and db read/write.
- */
- interface JsonArray extends Array{}
- interface JsonArray {
+ getBody: () => io.ReadCloser
/**
- * MarshalJSON implements the [json.Marshaler] interface.
+ * ContentLength records the length of the associated content.
+ * The value -1 indicates that the length is unknown.
+ * Values >= 0 indicate that the given number of bytes may
+ * be read from Body.
+ *
+ * For client requests, a value of 0 with a non-nil Body is
+ * also treated as unknown.
*/
- marshalJSON(): string|Array
- }
- interface JsonArray {
+ contentLength: number
/**
- * Value implements the [driver.Valuer] interface.
+ * TransferEncoding lists the transfer encodings from outermost to
+ * innermost. An empty list denotes the "identity" encoding.
+ * TransferEncoding can usually be ignored; chunked encoding is
+ * automatically added and removed as necessary when sending and
+ * receiving requests.
*/
- value(): any
- }
- interface JsonArray {
+ transferEncoding: Array
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current JsonArray[T] instance.
+ * Close indicates whether to close the connection after
+ * replying to this request (for servers) or after sending this
+ * request and reading its response (for clients).
+ *
+ * For server requests, the HTTP server handles this automatically
+ * and this field is not needed by Handlers.
+ *
+ * For client requests, setting this field prevents re-use of
+ * TCP connections between requests to the same hosts, as if
+ * Transport.DisableKeepAlives were set.
*/
- scan(value: any): void
- }
- /**
- * JsonMap defines a map that is safe for json and db read/write.
- */
- interface JsonMap extends _TygojaDict{}
- interface JsonMap {
+ close: boolean
/**
- * MarshalJSON implements the [json.Marshaler] interface.
+ * For server requests, Host specifies the host on which the
+ * URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
+ * is either the value of the "Host" header or the host name
+ * given in the URL itself. For HTTP/2, it is the value of the
+ * ":authority" pseudo-header field.
+ * It may be of the form "host:port". For international domain
+ * names, Host may be in Punycode or Unicode form. Use
+ * golang.org/x/net/idna to convert it to either format if
+ * needed.
+ * To prevent DNS rebinding attacks, server Handlers should
+ * validate that the Host header has a value for which the
+ * Handler considers itself authoritative. The included
+ * ServeMux supports patterns registered to particular host
+ * names and thus protects its registered Handlers.
+ *
+ * For client requests, Host optionally overrides the Host
+ * header to send. If empty, the Request.Write method uses
+ * the value of URL.Host. Host may contain an international
+ * domain name.
*/
- marshalJSON(): string|Array
- }
- interface JsonMap {
+ host: string
/**
- * Get retrieves a single value from the current JsonMap.
- *
- * This helper was added primarily to assist the goja integration since custom map types
- * don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
+ * Form contains the parsed form data, including both the URL
+ * field's query parameters and the PATCH, POST, or PUT form data.
+ * This field is only available after ParseForm is called.
+ * The HTTP client ignores Form and uses Body instead.
*/
- get(key: string): any
- }
- interface JsonMap {
+ form: url.Values
/**
- * Set sets a single value in the current JsonMap.
+ * PostForm contains the parsed form data from PATCH, POST
+ * or PUT body parameters.
*
- * This helper was added primarily to assist the goja integration since custom map types
- * don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
+ * This field is only available after ParseForm is called.
+ * The HTTP client ignores PostForm and uses Body instead.
*/
- set(key: string, value: any): void
- }
- interface JsonMap {
+ postForm: url.Values
/**
- * Value implements the [driver.Valuer] interface.
+ * MultipartForm is the parsed multipart form, including file uploads.
+ * This field is only available after ParseMultipartForm is called.
+ * The HTTP client ignores MultipartForm and uses Body instead.
*/
- value(): any
- }
- interface JsonMap {
+ multipartForm?: multipart.Form
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current `JsonMap` instance.
+ * Trailer specifies additional headers that are sent after the request
+ * body.
+ *
+ * For server requests, the Trailer map initially contains only the
+ * trailer keys, with nil values. (The client declares which trailers it
+ * will later send.) While the handler is reading from Body, it must
+ * not reference Trailer. After reading from Body returns EOF, Trailer
+ * can be read again and will contain non-nil values, if they were sent
+ * by the client.
+ *
+ * For client requests, Trailer must be initialized to a map containing
+ * the trailer keys to later send. The values may be nil or their final
+ * values. The ContentLength must be 0 or -1, to send a chunked request.
+ * After the HTTP request is sent the map values can be updated while
+ * the request body is read. Once the body returns EOF, the caller must
+ * not mutate Trailer.
+ *
+ * Few HTTP clients, servers, or proxies support HTTP trailers.
*/
- scan(value: any): void
- }
-}
-
-namespace auth {
- /**
- * AuthUser defines a standardized oauth2 user data structure.
- */
- interface AuthUser {
- id: string
- name: string
- username: string
- email: string
- avatarUrl: string
- accessToken: string
- refreshToken: string
- expiry: types.DateTime
- rawUser: _TygojaDict
- }
- /**
- * Provider defines a common interface for an OAuth2 client.
- */
- interface Provider {
- [key:string]: any;
+ trailer: Header
/**
- * Scopes returns the context associated with the provider (if any).
+ * RemoteAddr allows HTTP servers and other software to record
+ * the network address that sent the request, usually for
+ * logging. This field is not filled in by ReadRequest and
+ * has no defined format. The HTTP server in this package
+ * sets RemoteAddr to an "IP:port" address before invoking a
+ * handler.
+ * This field is ignored by the HTTP client.
*/
- context(): context.Context
+ remoteAddr: string
/**
- * SetContext assigns the specified context to the current provider.
+ * RequestURI is the unmodified request-target of the
+ * Request-Line (RFC 7230, Section 3.1.1) as sent by the client
+ * to a server. Usually the URL field should be used instead.
+ * It is an error to set this field in an HTTP client request.
*/
- setContext(ctx: context.Context): void
+ requestURI: string
/**
- * PKCE indicates whether the provider can use the PKCE flow.
+ * TLS allows HTTP servers and other software to record
+ * information about the TLS connection on which the request
+ * was received. This field is not filled in by ReadRequest.
+ * The HTTP server in this package sets the field for
+ * TLS-enabled connections before invoking a handler;
+ * otherwise it leaves the field nil.
+ * This field is ignored by the HTTP client.
*/
- pkce(): boolean
+ tls?: any
/**
- * SetPKCE toggles the state whether the provider can use the PKCE flow or not.
+ * Cancel is an optional channel whose closure indicates that the client
+ * request should be regarded as canceled. Not all implementations of
+ * RoundTripper may support Cancel.
+ *
+ * For server requests, this field is not applicable.
+ *
+ * Deprecated: Set the Request's context with NewRequestWithContext
+ * instead. If a Request's Cancel field and context are both
+ * set, it is undefined whether Cancel is respected.
*/
- setPKCE(enable: boolean): void
+ cancel: undefined
/**
- * DisplayName usually returns provider name as it is officially written
- * and it could be used directly in the UI.
+ * Response is the redirect response which caused this request
+ * to be created. This field is only populated during client
+ * redirects.
*/
- displayName(): string
+ response?: Response
+ }
+ interface Request {
/**
- * SetDisplayName sets the provider's display name.
+ * Context returns the request's context. To change the context, use
+ * [Request.Clone] or [Request.WithContext].
+ *
+ * The returned context is always non-nil; it defaults to the
+ * background context.
+ *
+ * For outgoing client requests, the context controls cancellation.
+ *
+ * For incoming server requests, the context is canceled when the
+ * client's connection closes, the request is canceled (with HTTP/2),
+ * or when the ServeHTTP method returns.
*/
- setDisplayName(displayName: string): void
+ context(): context.Context
+ }
+ interface Request {
/**
- * Scopes returns the provider access permissions that will be requested.
+ * WithContext returns a shallow copy of r with its context changed
+ * to ctx. The provided ctx must be non-nil.
+ *
+ * For outgoing client request, the context controls the entire
+ * lifetime of a request and its response: obtaining a connection,
+ * sending the request, and reading the response headers and body.
+ *
+ * To create a new request with a context, use [NewRequestWithContext].
+ * To make a deep copy of a request with a new context, use [Request.Clone].
*/
- scopes(): Array
+ withContext(ctx: context.Context): (Request)
+ }
+ interface Request {
/**
- * SetScopes sets the provider access permissions that will be requested later.
+ * Clone returns a deep copy of r with its context changed to ctx.
+ * The provided ctx must be non-nil.
+ *
+ * For an outgoing client request, the context controls the entire
+ * lifetime of a request and its response: obtaining a connection,
+ * sending the request, and reading the response headers and body.
*/
- setScopes(scopes: Array): void
+ clone(ctx: context.Context): (Request)
+ }
+ interface Request {
/**
- * ClientId returns the provider client's app ID.
+ * ProtoAtLeast reports whether the HTTP protocol used
+ * in the request is at least major.minor.
*/
- clientId(): string
+ protoAtLeast(major: number, minor: number): boolean
+ }
+ interface Request {
/**
- * SetClientId sets the provider client's ID.
+ * UserAgent returns the client's User-Agent, if sent in the request.
*/
- setClientId(clientId: string): void
+ userAgent(): string
+ }
+ interface Request {
/**
- * ClientSecret returns the provider client's app secret.
+ * Cookies parses and returns the HTTP cookies sent with the request.
*/
- clientSecret(): string
+ cookies(): Array<(Cookie | undefined)>
+ }
+ interface Request {
/**
- * SetClientSecret sets the provider client's app secret.
+ * Cookie returns the named cookie provided in the request or
+ * [ErrNoCookie] if not found.
+ * If multiple cookies match the given name, only one cookie will
+ * be returned.
*/
- setClientSecret(secret: string): void
+ cookie(name: string): (Cookie)
+ }
+ interface Request {
/**
- * RedirectUrl returns the end address to redirect the user
- * going through the OAuth flow.
+ * AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
+ * AddCookie does not attach more than one [Cookie] header field. That
+ * means all cookies, if any, are written into the same line,
+ * separated by semicolon.
+ * AddCookie only sanitizes c's name and value, and does not sanitize
+ * a Cookie header already present in the request.
*/
- redirectUrl(): string
+ addCookie(c: Cookie): void
+ }
+ interface Request {
/**
- * SetRedirectUrl sets the provider's RedirectUrl.
+ * Referer returns the referring URL, if sent in the request.
+ *
+ * Referer is misspelled as in the request itself, a mistake from the
+ * earliest days of HTTP. This value can also be fetched from the
+ * [Header] map as Header["Referer"]; the benefit of making it available
+ * as a method is that the compiler can diagnose programs that use the
+ * alternate (correct English) spelling req.Referrer() but cannot
+ * diagnose programs that use Header["Referrer"].
*/
- setRedirectUrl(url: string): void
+ referer(): string
+ }
+ interface Request {
/**
- * AuthUrl returns the provider's authorization service url.
+ * MultipartReader returns a MIME multipart reader if this is a
+ * multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
+ * Use this function instead of [Request.ParseMultipartForm] to
+ * process the request body as a stream.
*/
- authUrl(): string
+ multipartReader(): (multipart.Reader)
+ }
+ interface Request {
/**
- * SetAuthUrl sets the provider's AuthUrl.
+ * Write writes an HTTP/1.1 request, which is the header and body, in wire format.
+ * This method consults the following fields of the request:
+ *
+ * ```
+ * Host
+ * URL
+ * Method (defaults to "GET")
+ * Header
+ * ContentLength
+ * TransferEncoding
+ * Body
+ * ```
+ *
+ * If Body is present, Content-Length is <= 0 and [Request.TransferEncoding]
+ * hasn't been set to "identity", Write adds "Transfer-Encoding:
+ * chunked" to the header. Body is closed after it is sent.
*/
- setAuthUrl(url: string): void
+ write(w: io.Writer): void
+ }
+ interface Request {
/**
- * TokenUrl returns the provider's token exchange service url.
+ * WriteProxy is like [Request.Write] but writes the request in the form
+ * expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the
+ * initial Request-URI line of the request with an absolute URI, per
+ * section 5.3 of RFC 7230, including the scheme and host.
+ * In either case, WriteProxy also writes a Host header, using
+ * either r.Host or r.URL.Host.
*/
- tokenUrl(): string
+ writeProxy(w: io.Writer): void
+ }
+ interface Request {
/**
- * SetTokenUrl sets the provider's TokenUrl.
+ * BasicAuth returns the username and password provided in the request's
+ * Authorization header, if the request uses HTTP Basic Authentication.
+ * See RFC 2617, Section 2.
*/
- setTokenUrl(url: string): void
+ basicAuth(): [string, boolean]
+ }
+ interface Request {
/**
- * UserApiUrl returns the provider's user info api url.
+ * SetBasicAuth sets the request's Authorization header to use HTTP
+ * Basic Authentication with the provided username and password.
+ *
+ * With HTTP Basic Authentication the provided username and password
+ * are not encrypted. It should generally only be used in an HTTPS
+ * request.
+ *
+ * The username may not contain a colon. Some protocols may impose
+ * additional requirements on pre-escaping the username and
+ * password. For instance, when used with OAuth2, both arguments must
+ * be URL encoded first with [url.QueryEscape].
*/
- userApiUrl(): string
+ setBasicAuth(username: string, password: string): void
+ }
+ interface Request {
/**
- * SetUserApiUrl sets the provider's UserApiUrl.
+ * ParseForm populates r.Form and r.PostForm.
+ *
+ * For all requests, ParseForm parses the raw query from the URL and updates
+ * r.Form.
+ *
+ * For POST, PUT, and PATCH requests, it also reads the request body, parses it
+ * as a form and puts the results into both r.PostForm and r.Form. Request body
+ * parameters take precedence over URL query string values in r.Form.
+ *
+ * If the request Body's size has not already been limited by [MaxBytesReader],
+ * the size is capped at 10MB.
+ *
+ * For other HTTP methods, or when the Content-Type is not
+ * application/x-www-form-urlencoded, the request Body is not read, and
+ * r.PostForm is initialized to a non-nil, empty value.
+ *
+ * [Request.ParseMultipartForm] calls ParseForm automatically.
+ * ParseForm is idempotent.
*/
- setUserApiUrl(url: string): void
+ parseForm(): void
+ }
+ interface Request {
/**
- * Client returns an http client using the provided token.
+ * ParseMultipartForm parses a request body as multipart/form-data.
+ * The whole request body is parsed and up to a total of maxMemory bytes of
+ * its file parts are stored in memory, with the remainder stored on
+ * disk in temporary files.
+ * ParseMultipartForm calls [Request.ParseForm] if necessary.
+ * If ParseForm returns an error, ParseMultipartForm returns it but also
+ * continues parsing the request body.
+ * After one call to ParseMultipartForm, subsequent calls have no effect.
*/
- client(token: oauth2.Token): (any)
+ parseMultipartForm(maxMemory: number): void
+ }
+ interface Request {
/**
- * BuildAuthUrl returns a URL to the provider's consent page
- * that asks for permissions for the required scopes explicitly.
+ * FormValue returns the first value for the named component of the query.
+ * The precedence order:
+ * 1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only)
+ * 2. query parameters (always)
+ * 3. multipart/form-data form body (always)
+ *
+ * FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm]
+ * if necessary and ignores any errors returned by these functions.
+ * If key is not present, FormValue returns the empty string.
+ * To access multiple values of the same key, call ParseForm and
+ * then inspect [Request.Form] directly.
*/
- buildAuthUrl(state: string, ...opts: oauth2.AuthCodeOption[]): string
+ formValue(key: string): string
+ }
+ interface Request {
/**
- * FetchToken converts an authorization code to token.
+ * PostFormValue returns the first value for the named component of the POST,
+ * PUT, or PATCH request body. URL query parameters are ignored.
+ * PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores
+ * any errors returned by these functions.
+ * If key is not present, PostFormValue returns the empty string.
*/
- fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token)
+ postFormValue(key: string): string
+ }
+ interface Request {
/**
- * FetchRawUserData requests and marshalizes into `result` the
- * the OAuth user api response.
+ * FormFile returns the first file for the provided form key.
+ * FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary.
*/
- fetchRawUserData(token: oauth2.Token): string|Array
+ formFile(key: string): [multipart.File, (multipart.FileHeader)]
+ }
+ interface Request {
/**
- * FetchAuthUser is similar to FetchRawUserData, but normalizes and
- * marshalizes the user api response into a standardized AuthUser struct.
+ * PathValue returns the value for the named path wildcard in the [ServeMux] pattern
+ * that matched the request.
+ * It returns the empty string if the request was not matched against a pattern
+ * or there is no such wildcard in the pattern.
*/
- fetchAuthUser(token: oauth2.Token): (AuthUser)
+ pathValue(name: string): string
+ }
+ interface Request {
+ /**
+ * SetPathValue sets name to value, so that subsequent calls to r.PathValue(name)
+ * return value.
+ */
+ setPathValue(name: string, value: string): void
}
-}
-
-/**
- * Package echo implements high performance, minimalist Go web framework.
- *
- * Example:
- *
- * ```
- * package main
- *
- * import (
- * "github.com/labstack/echo/v5"
- * "github.com/labstack/echo/v5/middleware"
- * "log"
- * "net/http"
- * )
- *
- * // Handler
- * func hello(c echo.Context) error {
- * return c.String(http.StatusOK, "Hello, World!")
- * }
- *
- * func main() {
- * // Echo instance
- * e := echo.New()
- *
- * // Middleware
- * e.Use(middleware.Logger())
- * e.Use(middleware.Recover())
- *
- * // Routes
- * e.GET("/", hello)
- *
- * // Start server
- * if err := e.Start(":8080"); err != http.ErrServerClosed {
- * log.Fatal(err)
- * }
- * }
- * ```
- *
- * Learn more at https://echo.labstack.com
- */
-namespace echo {
/**
- * Context represents the context of the current HTTP request. It holds request and
- * response objects, path, path parameters, data and registered handler.
+ * A ResponseWriter interface is used by an HTTP handler to
+ * construct an HTTP response.
+ *
+ * A ResponseWriter may not be used after [Handler.ServeHTTP] has returned.
*/
- interface Context {
+ interface ResponseWriter {
[key:string]: any;
/**
- * Request returns `*http.Request`.
- */
- request(): (http.Request)
- /**
- * SetRequest sets `*http.Request`.
- */
- setRequest(r: http.Request): void
- /**
- * SetResponse sets `*Response`.
- */
- setResponse(r: Response): void
- /**
- * Response returns `*Response`.
- */
- response(): (Response)
- /**
- * IsTLS returns true if HTTP connection is TLS otherwise false.
+ * Header returns the header map that will be sent by
+ * [ResponseWriter.WriteHeader]. The [Header] map also is the mechanism with which
+ * [Handler] implementations can set HTTP trailers.
+ *
+ * Changing the header map after a call to [ResponseWriter.WriteHeader] (or
+ * [ResponseWriter.Write]) has no effect unless the HTTP status code was of the
+ * 1xx class or the modified headers are trailers.
+ *
+ * There are two ways to set Trailers. The preferred way is to
+ * predeclare in the headers which trailers you will later
+ * send by setting the "Trailer" header to the names of the
+ * trailer keys which will come later. In this case, those
+ * keys of the Header map are treated as if they were
+ * trailers. See the example. The second way, for trailer
+ * keys not known to the [Handler] until after the first [ResponseWriter.Write],
+ * is to prefix the [Header] map keys with the [TrailerPrefix]
+ * constant value.
+ *
+ * To suppress automatic response headers (such as "Date"), set
+ * their value to nil.
*/
- isTLS(): boolean
+ header(): Header
/**
- * IsWebSocket returns true if HTTP connection is WebSocket otherwise false.
- */
- isWebSocket(): boolean
- /**
- * Scheme returns the HTTP protocol scheme, `http` or `https`.
+ * Write writes the data to the connection as part of an HTTP reply.
+ *
+ * If [ResponseWriter.WriteHeader] has not yet been called, Write calls
+ * WriteHeader(http.StatusOK) before writing the data. If the Header
+ * does not contain a Content-Type line, Write adds a Content-Type set
+ * to the result of passing the initial 512 bytes of written data to
+ * [DetectContentType]. Additionally, if the total size of all written
+ * data is under a few KB and there are no Flush calls, the
+ * Content-Length header is added automatically.
+ *
+ * Depending on the HTTP protocol version and the client, calling
+ * Write or WriteHeader may prevent future reads on the
+ * Request.Body. For HTTP/1.x requests, handlers should read any
+ * needed request body data before writing the response. Once the
+ * headers have been flushed (due to either an explicit Flusher.Flush
+ * call or writing enough data to trigger a flush), the request body
+ * may be unavailable. For HTTP/2 requests, the Go HTTP server permits
+ * handlers to continue to read the request body while concurrently
+ * writing the response. However, such behavior may not be supported
+ * by all HTTP/2 clients. Handlers should read before writing if
+ * possible to maximize compatibility.
*/
- scheme(): string
+ write(_arg0: string|Array): number
/**
- * RealIP returns the client's network address based on `X-Forwarded-For`
- * or `X-Real-IP` request header.
- * The behavior can be configured using `Echo#IPExtractor`.
+ * WriteHeader sends an HTTP response header with the provided
+ * status code.
+ *
+ * If WriteHeader is not called explicitly, the first call to Write
+ * will trigger an implicit WriteHeader(http.StatusOK).
+ * Thus explicit calls to WriteHeader are mainly used to
+ * send error codes or 1xx informational responses.
+ *
+ * The provided code must be a valid HTTP 1xx-5xx status code.
+ * Any number of 1xx headers may be written, followed by at most
+ * one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx
+ * headers may be buffered. Use the Flusher interface to send
+ * buffered data. The header map is cleared when 2xx-5xx headers are
+ * sent, but not with 1xx headers.
+ *
+ * The server will automatically send a 100 (Continue) header
+ * on the first read from the request body if the request has
+ * an "Expect: 100-continue" header.
*/
- realIP(): string
+ writeHeader(statusCode: number): void
+ }
+ /**
+ * A Server defines parameters for running an HTTP server.
+ * The zero value for Server is a valid configuration.
+ */
+ interface Server {
/**
- * RouteInfo returns current request route information. Method, Path, Name and params if they exist for matched route.
- * In case of 404 (route not found) and 405 (method not allowed) RouteInfo returns generic struct for these cases.
+ * Addr optionally specifies the TCP address for the server to listen on,
+ * in the form "host:port". If empty, ":http" (port 80) is used.
+ * The service names are defined in RFC 6335 and assigned by IANA.
+ * See net.Dial for details of the address format.
*/
- routeInfo(): RouteInfo
+ addr: string
+ handler: Handler // handler to invoke, http.DefaultServeMux if nil
/**
- * Path returns the registered path for the handler.
+ * DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the Handler,
+ * otherwise responds with 200 OK and Content-Length: 0.
*/
- path(): string
+ disableGeneralOptionsHandler: boolean
/**
- * PathParam returns path parameter by name.
+ * TLSConfig optionally provides a TLS configuration for use
+ * by ServeTLS and ListenAndServeTLS. Note that this value is
+ * cloned by ServeTLS and ListenAndServeTLS, so it's not
+ * possible to modify the configuration with methods like
+ * tls.Config.SetSessionTicketKeys. To use
+ * SetSessionTicketKeys, use Server.Serve with a TLS Listener
+ * instead.
*/
- pathParam(name: string): string
+ tlsConfig?: any
/**
- * PathParamDefault returns the path parameter or default value for the provided name.
+ * ReadTimeout is the maximum duration for reading the entire
+ * request, including the body. A zero or negative value means
+ * there will be no timeout.
*
- * Notes for DefaultRouter implementation:
- * Path parameter could be empty for cases like that:
- * * route `/release-:version/bin` and request URL is `/release-/bin`
- * * route `/api/:version/image.jpg` and request URL is `/api//image.jpg`
- * but not when path parameter is last part of route path
- * * route `/download/file.:ext` will not match request `/download/file.`
+ * Because ReadTimeout does not let Handlers make per-request
+ * decisions on each request body's acceptable deadline or
+ * upload rate, most users will prefer to use
+ * ReadHeaderTimeout. It is valid to use them both.
*/
- pathParamDefault(name: string, defaultValue: string): string
+ readTimeout: time.Duration
/**
- * PathParams returns path parameter values.
+ * ReadHeaderTimeout is the amount of time allowed to read
+ * request headers. The connection's read deadline is reset
+ * after reading the headers and the Handler can decide what
+ * is considered too slow for the body. If ReadHeaderTimeout
+ * is zero, the value of ReadTimeout is used. If both are
+ * zero, there is no timeout.
*/
- pathParams(): PathParams
+ readHeaderTimeout: time.Duration
/**
- * SetPathParams sets path parameters for current request.
+ * WriteTimeout is the maximum duration before timing out
+ * writes of the response. It is reset whenever a new
+ * request's header is read. Like ReadTimeout, it does not
+ * let Handlers make decisions on a per-request basis.
+ * A zero or negative value means there will be no timeout.
*/
- setPathParams(params: PathParams): void
+ writeTimeout: time.Duration
/**
- * QueryParam returns the query param for the provided name.
+ * IdleTimeout is the maximum amount of time to wait for the
+ * next request when keep-alives are enabled. If IdleTimeout
+ * is zero, the value of ReadTimeout is used. If both are
+ * zero, there is no timeout.
*/
- queryParam(name: string): string
+ idleTimeout: time.Duration
/**
- * QueryParamDefault returns the query param or default value for the provided name.
+ * MaxHeaderBytes controls the maximum number of bytes the
+ * server will read parsing the request header's keys and
+ * values, including the request line. It does not limit the
+ * size of the request body.
+ * If zero, DefaultMaxHeaderBytes is used.
*/
- queryParamDefault(name: string): string
+ maxHeaderBytes: number
/**
- * QueryParams returns the query parameters as `url.Values`.
+ * TLSNextProto optionally specifies a function to take over
+ * ownership of the provided TLS connection when an ALPN
+ * protocol upgrade has occurred. The map key is the protocol
+ * name negotiated. The Handler argument should be used to
+ * handle HTTP requests and will initialize the Request's TLS
+ * and RemoteAddr if not already set. The connection is
+ * automatically closed when the function returns.
+ * If TLSNextProto is not nil, HTTP/2 support is not enabled
+ * automatically.
*/
- queryParams(): url.Values
+ tlsNextProto: _TygojaDict
/**
- * QueryString returns the URL query string.
+ * ConnState specifies an optional callback function that is
+ * called when a client connection changes state. See the
+ * ConnState type and associated constants for details.
*/
- queryString(): string
+ connState: (_arg0: net.Conn, _arg1: ConnState) => void
/**
- * FormValue returns the form field value for the provided name.
+ * ErrorLog specifies an optional logger for errors accepting
+ * connections, unexpected behavior from handlers, and
+ * underlying FileSystem errors.
+ * If nil, logging is done via the log package's standard logger.
*/
- formValue(name: string): string
+ errorLog?: any
/**
- * FormValueDefault returns the form field value or default value for the provided name.
+ * BaseContext optionally specifies a function that returns
+ * the base context for incoming requests on this server.
+ * The provided Listener is the specific Listener that's
+ * about to start accepting requests.
+ * If BaseContext is nil, the default is context.Background().
+ * If non-nil, it must return a non-nil context.
*/
- formValueDefault(name: string): string
+ baseContext: (_arg0: net.Listener) => context.Context
/**
- * FormValues returns the form field values as `url.Values`.
+ * ConnContext optionally specifies a function that modifies
+ * the context used for a new connection c. The provided ctx
+ * is derived from the base context and has a ServerContextKey
+ * value.
*/
- formValues(): url.Values
+ connContext: (ctx: context.Context, c: net.Conn) => context.Context
+ }
+ interface Server {
/**
- * FormFile returns the multipart form file for the provided name.
+ * Close immediately closes all active net.Listeners and any
+ * connections in state [StateNew], [StateActive], or [StateIdle]. For a
+ * graceful shutdown, use [Server.Shutdown].
+ *
+ * Close does not attempt to close (and does not even know about)
+ * any hijacked connections, such as WebSockets.
+ *
+ * Close returns any error returned from closing the [Server]'s
+ * underlying Listener(s).
*/
- formFile(name: string): (multipart.FileHeader)
+ close(): void
+ }
+ interface Server {
/**
- * MultipartForm returns the multipart form.
+ * Shutdown gracefully shuts down the server without interrupting any
+ * active connections. Shutdown works by first closing all open
+ * listeners, then closing all idle connections, and then waiting
+ * indefinitely for connections to return to idle and then shut down.
+ * If the provided context expires before the shutdown is complete,
+ * Shutdown returns the context's error, otherwise it returns any
+ * error returned from closing the [Server]'s underlying Listener(s).
+ *
+ * When Shutdown is called, [Serve], [ListenAndServe], and
+ * [ListenAndServeTLS] immediately return [ErrServerClosed]. Make sure the
+ * program doesn't exit and waits instead for Shutdown to return.
+ *
+ * Shutdown does not attempt to close nor wait for hijacked
+ * connections such as WebSockets. The caller of Shutdown should
+ * separately notify such long-lived connections of shutdown and wait
+ * for them to close, if desired. See [Server.RegisterOnShutdown] for a way to
+ * register shutdown notification functions.
+ *
+ * Once Shutdown has been called on a server, it may not be reused;
+ * future calls to methods such as Serve will return ErrServerClosed.
*/
- multipartForm(): (multipart.Form)
+ shutdown(ctx: context.Context): void
+ }
+ interface Server {
/**
- * Cookie returns the named cookie provided in the request.
+ * RegisterOnShutdown registers a function to call on [Server.Shutdown].
+ * This can be used to gracefully shutdown connections that have
+ * undergone ALPN protocol upgrade or that have been hijacked.
+ * This function should start protocol-specific graceful shutdown,
+ * but should not wait for shutdown to complete.
*/
- cookie(name: string): (http.Cookie)
+ registerOnShutdown(f: () => void): void
+ }
+ interface Server {
/**
- * SetCookie adds a `Set-Cookie` header in HTTP response.
+ * ListenAndServe listens on the TCP network address srv.Addr and then
+ * calls [Serve] to handle requests on incoming connections.
+ * Accepted connections are configured to enable TCP keep-alives.
+ *
+ * If srv.Addr is blank, ":http" is used.
+ *
+ * ListenAndServe always returns a non-nil error. After [Server.Shutdown] or [Server.Close],
+ * the returned error is [ErrServerClosed].
*/
- setCookie(cookie: http.Cookie): void
+ listenAndServe(): void
+ }
+ interface Server {
/**
- * Cookies returns the HTTP cookies sent with the request.
+ * Serve accepts incoming connections on the Listener l, creating a
+ * new service goroutine for each. The service goroutines read requests and
+ * then call srv.Handler to reply to them.
+ *
+ * HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
+ * connections and they were configured with "h2" in the TLS
+ * Config.NextProtos.
+ *
+ * Serve always returns a non-nil error and closes l.
+ * After [Server.Shutdown] or [Server.Close], the returned error is [ErrServerClosed].
*/
- cookies(): Array<(http.Cookie | undefined)>
+ serve(l: net.Listener): void
+ }
+ interface Server {
/**
- * Get retrieves data from the context.
+ * ServeTLS accepts incoming connections on the Listener l, creating a
+ * new service goroutine for each. The service goroutines perform TLS
+ * setup and then read requests, calling srv.Handler to reply to them.
+ *
+ * Files containing a certificate and matching private key for the
+ * server must be provided if neither the [Server]'s
+ * TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
+ * If the certificate is signed by a certificate authority, the
+ * certFile should be the concatenation of the server's certificate,
+ * any intermediates, and the CA's certificate.
+ *
+ * ServeTLS always returns a non-nil error. After [Server.Shutdown] or [Server.Close], the
+ * returned error is [ErrServerClosed].
*/
- get(key: string): {
+ serveTLS(l: net.Listener, certFile: string, keyFile: string): void
}
+ interface Server {
/**
- * Set saves data in the context.
+ * SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
+ * By default, keep-alives are always enabled. Only very
+ * resource-constrained environments or servers in the process of
+ * shutting down should disable them.
*/
- set(key: string, val: {
- }): void
+ setKeepAlivesEnabled(v: boolean): void
+ }
+ interface Server {
/**
- * Bind binds path params, query params and the request body into provided type `i`. The default binder
- * binds body based on Content-Type header.
- */
- bind(i: {
- }): void
- /**
- * Validate validates provided `i`. It is usually called after `Context#Bind()`.
- * Validator must be registered using `Echo#Validator`.
+ * ListenAndServeTLS listens on the TCP network address srv.Addr and
+ * then calls [ServeTLS] to handle requests on incoming TLS connections.
+ * Accepted connections are configured to enable TCP keep-alives.
+ *
+ * Filenames containing a certificate and matching private key for the
+ * server must be provided if neither the [Server]'s TLSConfig.Certificates
+ * nor TLSConfig.GetCertificate are populated. If the certificate is
+ * signed by a certificate authority, the certFile should be the
+ * concatenation of the server's certificate, any intermediates, and
+ * the CA's certificate.
+ *
+ * If srv.Addr is blank, ":https" is used.
+ *
+ * ListenAndServeTLS always returns a non-nil error. After [Server.Shutdown] or
+ * [Server.Close], the returned error is [ErrServerClosed].
*/
- validate(i: {
- }): void
+ listenAndServeTLS(certFile: string, keyFile: string): void
+ }
+}
+
+/**
+ * Package exec runs external commands. It wraps os.StartProcess to make it
+ * easier to remap stdin and stdout, connect I/O with pipes, and do other
+ * adjustments.
+ *
+ * Unlike the "system" library call from C and other languages, the
+ * os/exec package intentionally does not invoke the system shell and
+ * does not expand any glob patterns or handle other expansions,
+ * pipelines, or redirections typically done by shells. The package
+ * behaves more like C's "exec" family of functions. To expand glob
+ * patterns, either call the shell directly, taking care to escape any
+ * dangerous input, or use the path/filepath package's Glob function.
+ * To expand environment variables, use package os's ExpandEnv.
+ *
+ * Note that the examples in this package assume a Unix system.
+ * They may not run on Windows, and they do not run in the Go Playground
+ * used by golang.org and godoc.org.
+ *
+ * # Executables in the current directory
+ *
+ * The functions Command and LookPath look for a program
+ * in the directories listed in the current path, following the
+ * conventions of the host operating system.
+ * Operating systems have for decades included the current
+ * directory in this search, sometimes implicitly and sometimes
+ * configured explicitly that way by default.
+ * Modern practice is that including the current directory
+ * is usually unexpected and often leads to security problems.
+ *
+ * To avoid those security problems, as of Go 1.19, this package will not resolve a program
+ * using an implicit or explicit path entry relative to the current directory.
+ * That is, if you run exec.LookPath("go"), it will not successfully return
+ * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
+ * Instead, if the usual path algorithms would result in that answer,
+ * these functions return an error err satisfying errors.Is(err, ErrDot).
+ *
+ * For example, consider these two program snippets:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * These will not find and run ./prog or .\prog.exe,
+ * no matter how the current path is configured.
+ *
+ * Code that always wants to run a program from the current directory
+ * can be rewritten to say "./prog" instead of "prog".
+ *
+ * Code that insists on including results from relative path entries
+ * can instead override the error using an errors.Is check:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if errors.Is(err, exec.ErrDot) {
+ * err = nil
+ * }
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if errors.Is(cmd.Err, exec.ErrDot) {
+ * cmd.Err = nil
+ * }
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * Setting the environment variable GODEBUG=execerrdot=0
+ * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
+ * behavior for programs that are unable to apply more targeted fixes.
+ * A future version of Go may remove support for this variable.
+ *
+ * Before adding such overrides, make sure you understand the
+ * security implications of doing so.
+ * See https://go.dev/blog/path-security for more information.
+ */
+namespace exec {
+ /**
+ * Cmd represents an external command being prepared or run.
+ *
+ * A Cmd cannot be reused after calling its Run, Output or CombinedOutput
+ * methods.
+ */
+ interface Cmd {
/**
- * Render renders a template with data and sends a text/html response with status
- * code. Renderer must be registered using `Echo.Renderer`.
+ * Path is the path of the command to run.
+ *
+ * This is the only field that must be set to a non-zero
+ * value. If Path is relative, it is evaluated relative
+ * to Dir.
*/
- render(code: number, name: string, data: {
- }): void
+ path: string
/**
- * HTML sends an HTTP response with status code.
+ * Args holds command line arguments, including the command as Args[0].
+ * If the Args field is empty or nil, Run uses {Path}.
+ *
+ * In typical use, both Path and Args are set by calling Command.
*/
- html(code: number, html: string): void
+ args: Array
/**
- * HTMLBlob sends an HTTP blob response with status code.
+ * Env specifies the environment of the process.
+ * Each entry is of the form "key=value".
+ * If Env is nil, the new process uses the current process's
+ * environment.
+ * If Env contains duplicate environment keys, only the last
+ * value in the slice for each duplicate key is used.
+ * As a special case on Windows, SYSTEMROOT is always added if
+ * missing and not explicitly set to the empty string.
*/
- htmlBlob(code: number, b: string|Array): void
+ env: Array
/**
- * String sends a string response with status code.
+ * Dir specifies the working directory of the command.
+ * If Dir is the empty string, Run runs the command in the
+ * calling process's current directory.
*/
- string(code: number, s: string): void
+ dir: string
/**
- * JSON sends a JSON response with status code.
+ * Stdin specifies the process's standard input.
+ *
+ * If Stdin is nil, the process reads from the null device (os.DevNull).
+ *
+ * If Stdin is an *os.File, the process's standard input is connected
+ * directly to that file.
+ *
+ * Otherwise, during the execution of the command a separate
+ * goroutine reads from Stdin and delivers that data to the command
+ * over a pipe. In this case, Wait does not complete until the goroutine
+ * stops copying, either because it has reached the end of Stdin
+ * (EOF or a read error), or because writing to the pipe returned an error,
+ * or because a nonzero WaitDelay was set and expired.
*/
- json(code: number, i: {
- }): void
+ stdin: io.Reader
/**
- * JSONPretty sends a pretty-print JSON with status code.
+ * Stdout and Stderr specify the process's standard output and error.
+ *
+ * If either is nil, Run connects the corresponding file descriptor
+ * to the null device (os.DevNull).
+ *
+ * If either is an *os.File, the corresponding output from the process
+ * is connected directly to that file.
+ *
+ * Otherwise, during the execution of the command a separate goroutine
+ * reads from the process over a pipe and delivers that data to the
+ * corresponding Writer. In this case, Wait does not complete until the
+ * goroutine reaches EOF or encounters an error or a nonzero WaitDelay
+ * expires.
+ *
+ * If Stdout and Stderr are the same writer, and have a type that can
+ * be compared with ==, at most one goroutine at a time will call Write.
*/
- jsonPretty(code: number, i: {
- }, indent: string): void
+ stdout: io.Writer
+ stderr: io.Writer
/**
- * JSONBlob sends a JSON blob response with status code.
+ * ExtraFiles specifies additional open files to be inherited by the
+ * new process. It does not include standard input, standard output, or
+ * standard error. If non-nil, entry i becomes file descriptor 3+i.
+ *
+ * ExtraFiles is not supported on Windows.
*/
- jsonBlob(code: number, b: string|Array): void
+ extraFiles: Array<(os.File | undefined)>
/**
- * JSONP sends a JSONP response with status code. It uses `callback` to construct
- * the JSONP payload.
+ * SysProcAttr holds optional, operating system-specific attributes.
+ * Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
*/
- jsonp(code: number, callback: string, i: {
- }): void
+ sysProcAttr?: syscall.SysProcAttr
/**
- * JSONPBlob sends a JSONP blob response with status code. It uses `callback`
- * to construct the JSONP payload.
+ * Process is the underlying process, once started.
*/
- jsonpBlob(code: number, callback: string, b: string|Array): void
+ process?: os.Process
/**
- * XML sends an XML response with status code.
+ * ProcessState contains information about an exited process.
+ * If the process was started successfully, Wait or Run will
+ * populate its ProcessState when the command completes.
*/
- xml(code: number, i: {
- }): void
+ processState?: os.ProcessState
+ err: Error // LookPath error, if any.
/**
- * XMLPretty sends a pretty-print XML with status code.
+ * If Cancel is non-nil, the command must have been created with
+ * CommandContext and Cancel will be called when the command's
+ * Context is done. By default, CommandContext sets Cancel to
+ * call the Kill method on the command's Process.
+ *
+ * Typically a custom Cancel will send a signal to the command's
+ * Process, but it may instead take other actions to initiate cancellation,
+ * such as closing a stdin or stdout pipe or sending a shutdown request on a
+ * network socket.
+ *
+ * If the command exits with a success status after Cancel is
+ * called, and Cancel does not return an error equivalent to
+ * os.ErrProcessDone, then Wait and similar methods will return a non-nil
+ * error: either an error wrapping the one returned by Cancel,
+ * or the error from the Context.
+ * (If the command exits with a non-success status, or Cancel
+ * returns an error that wraps os.ErrProcessDone, Wait and similar methods
+ * continue to return the command's usual exit status.)
+ *
+ * If Cancel is set to nil, nothing will happen immediately when the command's
+ * Context is done, but a nonzero WaitDelay will still take effect. That may
+ * be useful, for example, to work around deadlocks in commands that do not
+ * support shutdown signals but are expected to always finish quickly.
+ *
+ * Cancel will not be called if Start returns a non-nil error.
*/
- xmlPretty(code: number, i: {
- }, indent: string): void
+ cancel: () => void
/**
- * XMLBlob sends an XML blob response with status code.
- */
- xmlBlob(code: number, b: string|Array): void
- /**
- * Blob sends a blob response with status code and content type.
- */
- blob(code: number, contentType: string, b: string|Array): void
- /**
- * Stream sends a streaming response with status code and content type.
+ * If WaitDelay is non-zero, it bounds the time spent waiting on two sources
+ * of unexpected delay in Wait: a child process that fails to exit after the
+ * associated Context is canceled, and a child process that exits but leaves
+ * its I/O pipes unclosed.
+ *
+ * The WaitDelay timer starts when either the associated Context is done or a
+ * call to Wait observes that the child process has exited, whichever occurs
+ * first. When the delay has elapsed, the command shuts down the child process
+ * and/or its I/O pipes.
+ *
+ * If the child process has failed to exit — perhaps because it ignored or
+ * failed to receive a shutdown signal from a Cancel function, or because no
+ * Cancel function was set — then it will be terminated using os.Process.Kill.
+ *
+ * Then, if the I/O pipes communicating with the child process are still open,
+ * those pipes are closed in order to unblock any goroutines currently blocked
+ * on Read or Write calls.
+ *
+ * If pipes are closed due to WaitDelay, no Cancel call has occurred,
+ * and the command has otherwise exited with a successful status, Wait and
+ * similar methods will return ErrWaitDelay instead of nil.
+ *
+ * If WaitDelay is zero (the default), I/O pipes will be read until EOF,
+ * which might not occur until orphaned subprocesses of the command have
+ * also closed their descriptors for the pipes.
*/
- stream(code: number, contentType: string, r: io.Reader): void
+ waitDelay: time.Duration
+ }
+ interface Cmd {
/**
- * File sends a response with the content of the file.
+ * String returns a human-readable description of c.
+ * It is intended only for debugging.
+ * In particular, it is not suitable for use as input to a shell.
+ * The output of String may vary across Go releases.
*/
- file(file: string): void
+ string(): string
+ }
+ interface Cmd {
/**
- * FileFS sends a response with the content of the file from given filesystem.
+ * Run starts the specified command and waits for it to complete.
+ *
+ * The returned error is nil if the command runs, has no problems
+ * copying stdin, stdout, and stderr, and exits with a zero exit
+ * status.
+ *
+ * If the command starts but does not complete successfully, the error is of
+ * type *ExitError. Other error types may be returned for other situations.
+ *
+ * If the calling goroutine has locked the operating system thread
+ * with runtime.LockOSThread and modified any inheritable OS-level
+ * thread state (for example, Linux or Plan 9 name spaces), the new
+ * process will inherit the caller's thread state.
*/
- fileFS(file: string, filesystem: fs.FS): void
+ run(): void
+ }
+ interface Cmd {
/**
- * Attachment sends a response as attachment, prompting client to save the
- * file.
+ * Start starts the specified command but does not wait for it to complete.
+ *
+ * If Start returns successfully, the c.Process field will be set.
+ *
+ * After a successful call to Start the Wait method must be called in
+ * order to release associated system resources.
*/
- attachment(file: string, name: string): void
+ start(): void
+ }
+ interface Cmd {
/**
- * Inline sends a response as inline, opening the file in the browser.
+ * Wait waits for the command to exit and waits for any copying to
+ * stdin or copying from stdout or stderr to complete.
+ *
+ * The command must have been started by Start.
+ *
+ * The returned error is nil if the command runs, has no problems
+ * copying stdin, stdout, and stderr, and exits with a zero exit
+ * status.
+ *
+ * If the command fails to run or doesn't complete successfully, the
+ * error is of type *ExitError. Other error types may be
+ * returned for I/O problems.
+ *
+ * If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also waits
+ * for the respective I/O loop copying to or from the process to complete.
+ *
+ * Wait releases any resources associated with the Cmd.
*/
- inline(file: string, name: string): void
+ wait(): void
+ }
+ interface Cmd {
/**
- * NoContent sends a response with no body and a status code.
+ * Output runs the command and returns its standard output.
+ * Any returned error will usually be of type *ExitError.
+ * If c.Stderr was nil, Output populates ExitError.Stderr.
*/
- noContent(code: number): void
+ output(): string|Array
+ }
+ interface Cmd {
/**
- * Redirect redirects the request to a provided URL with status code.
+ * CombinedOutput runs the command and returns its combined standard
+ * output and standard error.
*/
- redirect(code: number, url: string): void
+ combinedOutput(): string|Array
+ }
+ interface Cmd {
/**
- * Error invokes the registered global HTTP error handler. Generally used by middleware.
- * A side-effect of calling global error handler is that now Response has been committed (sent to the client) and
- * middlewares up in chain can not change Response status code or Response body anymore.
- *
- * Avoid using this method in handlers as no middleware will be able to effectively handle errors after that.
- * Instead of calling this method in handler return your error and let it be handled by middlewares or global error handler.
+ * StdinPipe returns a pipe that will be connected to the command's
+ * standard input when the command starts.
+ * The pipe will be closed automatically after Wait sees the command exit.
+ * A caller need only call Close to force the pipe to close sooner.
+ * For example, if the command being run will not exit until standard input
+ * is closed, the caller must close the pipe.
*/
- error(err: Error): void
+ stdinPipe(): io.WriteCloser
+ }
+ interface Cmd {
/**
- * Echo returns the `Echo` instance.
+ * StdoutPipe returns a pipe that will be connected to the command's
+ * standard output when the command starts.
*
- * WARNING: Remember that Echo public fields and methods are coroutine safe ONLY when you are NOT mutating them
- * anywhere in your code after Echo server has started.
+ * Wait will close the pipe after seeing the command exit, so most callers
+ * need not close the pipe themselves. It is thus incorrect to call Wait
+ * before all reads from the pipe have completed.
+ * For the same reason, it is incorrect to call Run when using StdoutPipe.
+ * See the example for idiomatic usage.
*/
- echo(): (Echo)
+ stdoutPipe(): io.ReadCloser
}
- // @ts-ignore
- import stdContext = context
- /**
- * Echo is the top-level framework instance.
- *
- * Goroutine safety: Do not mutate Echo instance fields after server has started. Accessing these
- * fields from handlers/middlewares and changing field values at the same time leads to data-races.
- * Same rule applies to adding new routes after server has been started - Adding a route is not Goroutine safe action.
- */
- interface Echo {
- /**
- * NewContextFunc allows using custom context implementations, instead of default *echo.context
- */
- newContextFunc: (e: Echo, pathParamAllocSize: number) => ServableContext
- debug: boolean
- httpErrorHandler: HTTPErrorHandler
- binder: Binder
- jsonSerializer: JSONSerializer
- validator: Validator
- renderer: Renderer
- logger: Logger
- ipExtractor: IPExtractor
+ interface Cmd {
/**
- * Filesystem is file system used by Static and File handlers to access files.
- * Defaults to os.DirFS(".")
+ * StderrPipe returns a pipe that will be connected to the command's
+ * standard error when the command starts.
*
- * When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
- * prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
- * including `assets/images` as their prefix.
+ * Wait will close the pipe after seeing the command exit, so most callers
+ * need not close the pipe themselves. It is thus incorrect to call Wait
+ * before all reads from the pipe have completed.
+ * For the same reason, it is incorrect to use Run when using StderrPipe.
+ * See the StdoutPipe example for idiomatic usage.
*/
- filesystem: fs.FS
+ stderrPipe(): io.ReadCloser
+ }
+ interface Cmd {
/**
- * OnAddRoute is called when Echo adds new route to specific host router. Handler is called for every router
- * and before route is added to the host router.
+ * Environ returns a copy of the environment in which the command would be run
+ * as it is currently configured.
*/
- onAddRoute: (host: string, route: Routable) => void
+ environ(): Array
}
+}
+
+/**
+ * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+ *
+ * See README.md for more info.
+ */
+namespace jwt {
/**
- * HandlerFunc defines a function to serve HTTP requests.
- */
- interface HandlerFunc {(c: Context): void }
- /**
- * MiddlewareFunc defines a function to process middleware.
+ * MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
+ * This is the default claims type if you don't supply one
*/
- interface MiddlewareFunc {(next: HandlerFunc): HandlerFunc }
- interface Echo {
+ interface MapClaims extends _TygojaDict{}
+ interface MapClaims {
/**
- * NewContext returns a new Context instance.
- *
- * Note: both request and response can be left to nil as Echo.ServeHTTP will call c.Reset(req,resp) anyway
- * these arguments are useful when creating context for tests and cases like that.
+ * VerifyAudience Compares the aud claim against cmp.
+ * If required is false, this method will return true if the value matches or is unset
*/
- newContext(r: http.Request, w: http.ResponseWriter): Context
+ verifyAudience(cmp: string, req: boolean): boolean
}
- interface Echo {
+ interface MapClaims {
/**
- * Router returns the default router.
+ * VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
+ * If req is false, it will return true, if exp is unset.
*/
- router(): Router
+ verifyExpiresAt(cmp: number, req: boolean): boolean
}
- interface Echo {
+ interface MapClaims {
/**
- * Routers returns the new map of host => router.
+ * VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
+ * If req is false, it will return true, if iat is unset.
*/
- routers(): _TygojaDict
+ verifyIssuedAt(cmp: number, req: boolean): boolean
}
- interface Echo {
+ interface MapClaims {
/**
- * RouterFor returns Router for given host. When host is left empty the default router is returned.
+ * VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+ * If req is false, it will return true, if nbf is unset.
*/
- routerFor(host: string): [Router, boolean]
+ verifyNotBefore(cmp: number, req: boolean): boolean
}
- interface Echo {
+ interface MapClaims {
/**
- * ResetRouterCreator resets callback for creating new router instances.
- * Note: current (default) router is immediately replaced with router created with creator func and vhost routers are cleared.
+ * VerifyIssuer compares the iss claim against cmp.
+ * If required is false, this method will return true if the value matches or is unset
*/
- resetRouterCreator(creator: (e: Echo) => Router): void
+ verifyIssuer(cmp: string, req: boolean): boolean
}
- interface Echo {
+ interface MapClaims {
/**
- * Pre adds middleware to the chain which is run before router tries to find matching route.
- * Meaning middleware is executed even for 404 (not found) cases.
+ * Valid validates time based claims "exp, iat, nbf".
+ * There is no accounting for clock skew.
+ * As well, if any of the above claims are not in the token, it will still
+ * be considered a valid claim.
*/
- pre(...middleware: MiddlewareFunc[]): void
+ valid(): void
}
- interface Echo {
- /**
- * Use adds middleware to the chain which is run after router has found matching route and before route/request handler method is executed.
- */
- use(...middleware: MiddlewareFunc[]): void
+}
+
+/**
+ * Package blob provides an easy and portable way to interact with blobs
+ * within a storage location. Subpackages contain driver implementations of
+ * blob for supported services.
+ *
+ * See https://gocloud.dev/howto/blob/ for a detailed how-to guide.
+ *
+ * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with
+ * functions in that package.
+ *
+ * # Errors
+ *
+ * The errors returned from this package can be inspected in several ways:
+ *
+ * The Code function from gocloud.dev/gcerrors will return an error code, also
+ * defined in that package, when invoked on an error.
+ *
+ * The Bucket.ErrorAs method can retrieve the driver error underlying the returned
+ * error.
+ *
+ * # OpenCensus Integration
+ *
+ * OpenCensus supports tracing and metric collection for multiple languages and
+ * backend providers. See https://opencensus.io.
+ *
+ * This API collects OpenCensus traces and metrics for the following methods:
+ * ```
+ * - Attributes
+ * - Copy
+ * - Delete
+ * - ListPage
+ * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll
+ * are included because they call NewRangeReader.)
+ * - NewWriter, from creation until the call to Close.
+ * ```
+ *
+ * All trace and metric names begin with the package import path.
+ * The traces add the method name.
+ * For example, "gocloud.dev/blob/Attributes".
+ * The metrics are "completed_calls", a count of completed method calls by driver,
+ * method and status (error code); and "latency", a distribution of method latency
+ * by driver and method.
+ * For example, "gocloud.dev/blob/latency".
+ *
+ * It also collects the following metrics:
+ * ```
+ * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver.
+ * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver.
+ * ```
+ *
+ * To enable trace collection in your application, see "Configure Exporter" at
+ * https://opencensus.io/quickstart/go/tracing.
+ * To enable metric collection in your application, see "Exporting stats" at
+ * https://opencensus.io/quickstart/go/metrics.
+ */
+namespace blob {
+ /**
+ * Reader reads bytes from a blob.
+ * It implements io.ReadSeekCloser, and must be closed after
+ * reads are finished.
+ */
+ interface Reader {
}
- interface Echo {
+ interface Reader {
/**
- * CONNECT registers a new CONNECT route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * Read implements io.Reader (https://golang.org/pkg/io/#Reader).
*/
- connect(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ read(p: string|Array): number
}
- interface Echo {
+ interface Reader {
/**
- * DELETE registers a new DELETE route for a path with matching handler in the router
- * with optional route-level middleware. Panics on error.
+ * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
*/
- delete(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ seek(offset: number, whence: number): number
}
- interface Echo {
+ interface Reader {
/**
- * GET registers a new GET route for a path with matching handler in the router
- * with optional route-level middleware. Panics on error.
+ * Close implements io.Closer (https://golang.org/pkg/io/#Closer).
*/
- get(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ close(): void
}
- interface Echo {
+ interface Reader {
/**
- * HEAD registers a new HEAD route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * ContentType returns the MIME type of the blob.
*/
- head(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ contentType(): string
}
- interface Echo {
+ interface Reader {
/**
- * OPTIONS registers a new OPTIONS route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * ModTime returns the time the blob was last modified.
*/
- options(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ modTime(): time.Time
}
- interface Echo {
+ interface Reader {
/**
- * PATCH registers a new PATCH route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * Size returns the size of the blob content in bytes.
*/
- patch(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ size(): number
}
- interface Echo {
+ interface Reader {
/**
- * POST registers a new POST route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * As converts i to driver-specific types.
+ * See https://gocloud.dev/concepts/as/ for background information, the "As"
+ * examples in this package for examples, and the driver package
+ * documentation for the specific types supported for that driver.
*/
- post(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ as(i: {
+ }): boolean
}
- interface Echo {
+ interface Reader {
/**
- * PUT registers a new PUT route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * WriteTo reads from r and writes to w until there's no more data or
+ * an error occurs.
+ * The return value is the number of bytes written to w.
+ *
+ * It implements the io.WriterTo interface.
*/
- put(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
+ writeTo(w: io.Writer): number
}
- interface Echo {
+ /**
+ * Attributes contains attributes about a blob.
+ */
+ interface Attributes {
/**
- * TRACE registers a new TRACE route for a path with matching handler in the
- * router with optional route-level middleware. Panics on error.
+ * CacheControl specifies caching attributes that services may use
+ * when serving the blob.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
*/
- trace(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
- }
- interface Echo {
+ cacheControl: string
/**
- * RouteNotFound registers a special-case route which is executed when no other route is found (i.e. HTTP 404 cases)
- * for current request URL.
- * Path supports static and named/any parameters just like other http method is defined. Generally path is ended with
- * wildcard/match-any character (`/*`, `/download/*` etc).
- *
- * Example: `e.RouteNotFound("/*", func(c echo.Context) error { return c.NoContent(http.StatusNotFound) })`
+ * ContentDisposition specifies whether the blob content is expected to be
+ * displayed inline or as an attachment.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
*/
- routeNotFound(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
- }
- interface Echo {
+ contentDisposition: string
/**
- * Any registers a new route for all HTTP methods (supported by Echo) and path with matching handler
- * in the router with optional route-level middleware.
- *
- * Note: this method only adds specific set of supported HTTP methods as handler and is not true
- * "catch-any-arbitrary-method" way of matching requests.
+ * ContentEncoding specifies the encoding used for the blob's content, if any.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
*/
- any(path: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): Routes
- }
- interface Echo {
+ contentEncoding: string
/**
- * Match registers a new route for multiple HTTP methods and path with matching
- * handler in the router with optional route-level middleware. Panics on error.
+ * ContentLanguage specifies the language used in the blob's content, if any.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
*/
- match(methods: Array, path: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): Routes
- }
- interface Echo {
+ contentLanguage: string
/**
- * Static registers a new route with path prefix to serve static files from the provided root directory.
+ * ContentType is the MIME type of the blob. It will not be empty.
+ * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
*/
- static(pathPrefix: string): RouteInfo
- }
- interface Echo {
+ contentType: string
/**
- * StaticFS registers a new route with path prefix to serve static files from the provided file system.
- *
- * When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
- * prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
- * including `assets/images` as their prefix.
+ * Metadata holds key/value pairs associated with the blob.
+ * Keys are guaranteed to be in lowercase, even if the backend service
+ * has case-sensitive keys (although note that Metadata written via
+ * this package will always be lowercased). If there are duplicate
+ * case-insensitive keys (e.g., "foo" and "FOO"), only one value
+ * will be kept, and it is undefined which one.
*/
- staticFS(pathPrefix: string, filesystem: fs.FS): RouteInfo
- }
- interface Echo {
+ metadata: _TygojaDict
/**
- * FileFS registers a new route with path to serve file from the provided file system.
+ * CreateTime is the time the blob was created, if available. If not available,
+ * CreateTime will be the zero time.
*/
- fileFS(path: string, filesystem: fs.FS, ...m: MiddlewareFunc[]): RouteInfo
- }
- interface Echo {
+ createTime: time.Time
/**
- * File registers a new route with path to serve a static file with optional route-level middleware. Panics on error.
+ * ModTime is the time the blob was last modified.
*/
- file(path: string, ...middleware: MiddlewareFunc[]): RouteInfo
- }
- interface Echo {
+ modTime: time.Time
/**
- * AddRoute registers a new Route with default host Router
+ * Size is the size of the blob's content in bytes.
*/
- addRoute(route: Routable): RouteInfo
- }
- interface Echo {
+ size: number
/**
- * Add registers a new route for an HTTP method and path with matching handler
- * in the router with optional route-level middleware.
+ * MD5 is an MD5 hash of the blob contents or nil if not available.
*/
- add(method: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): RouteInfo
- }
- interface Echo {
+ md5: string|Array
/**
- * Host creates a new router group for the provided host and optional host-level middleware.
+ * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
*/
- host(name: string, ...m: MiddlewareFunc[]): (Group)
+ eTag: string
}
- interface Echo {
+ interface Attributes {
/**
- * Group creates a new router group with prefix and optional group-level middleware.
+ * As converts i to driver-specific types.
+ * See https://gocloud.dev/concepts/as/ for background information, the "As"
+ * examples in this package for examples, and the driver package
+ * documentation for the specific types supported for that driver.
*/
- group(prefix: string, ...m: MiddlewareFunc[]): (Group)
+ as(i: {
+ }): boolean
}
- interface Echo {
+ /**
+ * ListObject represents a single blob returned from List.
+ */
+ interface ListObject {
/**
- * AcquireContext returns an empty `Context` instance from the pool.
- * You must return the context by calling `ReleaseContext()`.
+ * Key is the key for this blob.
*/
- acquireContext(): Context
- }
- interface Echo {
+ key: string
/**
- * ReleaseContext returns the `Context` instance back to the pool.
- * You must call it after `AcquireContext()`.
+ * ModTime is the time the blob was last modified.
*/
- releaseContext(c: Context): void
- }
- interface Echo {
+ modTime: time.Time
/**
- * ServeHTTP implements `http.Handler` interface, which serves HTTP requests.
+ * Size is the size of the blob's content in bytes.
*/
- serveHTTP(w: http.ResponseWriter, r: http.Request): void
- }
- interface Echo {
+ size: number
/**
- * Start stars HTTP server on given address with Echo as a handler serving requests. The server can be shutdown by
- * sending os.Interrupt signal with `ctrl+c`.
- *
- * Note: this method is created for use in examples/demos and is deliberately simple without providing configuration
- * options.
- *
- * In need of customization use:
- *
- * ```
- * sc := echo.StartConfig{Address: ":8080"}
- * if err := sc.Start(e); err != http.ErrServerClosed {
- * log.Fatal(err)
- * }
- * ```
- *
- * // or standard library `http.Server`
- *
- * ```
- * s := http.Server{Addr: ":8080", Handler: e}
- * if err := s.ListenAndServe(); err != http.ErrServerClosed {
- * log.Fatal(err)
- * }
- * ```
+ * MD5 is an MD5 hash of the blob contents or nil if not available.
*/
- start(address: string): void
+ md5: string|Array
+ /**
+ * IsDir indicates that this result represents a "directory" in the
+ * hierarchical namespace, ending in ListOptions.Delimiter. Key can be
+ * passed as ListOptions.Prefix to list items in the "directory".
+ * Fields other than Key and IsDir will not be set if IsDir is true.
+ */
+ isDir: boolean
+ }
+ interface ListObject {
+ /**
+ * As converts i to driver-specific types.
+ * See https://gocloud.dev/concepts/as/ for background information, the "As"
+ * examples in this package for examples, and the driver package
+ * documentation for the specific types supported for that driver.
+ */
+ as(i: {
+ }): boolean
}
}
/**
- * Package exec runs external commands. It wraps os.StartProcess to make it
- * easier to remap stdin and stdout, connect I/O with pipes, and do other
- * adjustments.
- *
- * Unlike the "system" library call from C and other languages, the
- * os/exec package intentionally does not invoke the system shell and
- * does not expand any glob patterns or handle other expansions,
- * pipelines, or redirections typically done by shells. The package
- * behaves more like C's "exec" family of functions. To expand glob
- * patterns, either call the shell directly, taking care to escape any
- * dangerous input, or use the path/filepath package's Glob function.
- * To expand environment variables, use package os's ExpandEnv.
- *
- * Note that the examples in this package assume a Unix system.
- * They may not run on Windows, and they do not run in the Go Playground
- * used by golang.org and godoc.org.
- *
- * # Executables in the current directory
- *
- * The functions Command and LookPath look for a program
- * in the directories listed in the current path, following the
- * conventions of the host operating system.
- * Operating systems have for decades included the current
- * directory in this search, sometimes implicitly and sometimes
- * configured explicitly that way by default.
- * Modern practice is that including the current directory
- * is usually unexpected and often leads to security problems.
- *
- * To avoid those security problems, as of Go 1.19, this package will not resolve a program
- * using an implicit or explicit path entry relative to the current directory.
- * That is, if you run exec.LookPath("go"), it will not successfully return
- * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
- * Instead, if the usual path algorithms would result in that answer,
- * these functions return an error err satisfying errors.Is(err, ErrDot).
- *
- * For example, consider these two program snippets:
- *
- * ```
- * path, err := exec.LookPath("prog")
- * if err != nil {
- * log.Fatal(err)
- * }
- * use(path)
- * ```
- *
- * and
- *
- * ```
- * cmd := exec.Command("prog")
- * if err := cmd.Run(); err != nil {
- * log.Fatal(err)
- * }
- * ```
- *
- * These will not find and run ./prog or .\prog.exe,
- * no matter how the current path is configured.
- *
- * Code that always wants to run a program from the current directory
- * can be rewritten to say "./prog" instead of "prog".
- *
- * Code that insists on including results from relative path entries
- * can instead override the error using an errors.Is check:
- *
- * ```
- * path, err := exec.LookPath("prog")
- * if errors.Is(err, exec.ErrDot) {
- * err = nil
- * }
- * if err != nil {
- * log.Fatal(err)
- * }
- * use(path)
- * ```
- *
- * and
- *
- * ```
- * cmd := exec.Command("prog")
- * if errors.Is(cmd.Err, exec.ErrDot) {
- * cmd.Err = nil
- * }
- * if err := cmd.Run(); err != nil {
- * log.Fatal(err)
- * }
- * ```
- *
- * Setting the environment variable GODEBUG=execerrdot=0
- * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
- * behavior for programs that are unable to apply more targeted fixes.
- * A future version of Go may remove support for this variable.
- *
- * Before adding such overrides, make sure you understand the
- * security implications of doing so.
- * See https://go.dev/blog/path-security for more information.
+ * Package types implements some commonly used db serializable types
+ * like datetime, json, etc.
*/
-namespace exec {
+namespace types {
/**
- * Cmd represents an external command being prepared or run.
- *
- * A Cmd cannot be reused after calling its Run, Output or CombinedOutput
- * methods.
+ * JsonArray defines a slice that is safe for json and db read/write.
*/
- interface Cmd {
- /**
- * Path is the path of the command to run.
- *
- * This is the only field that must be set to a non-zero
- * value. If Path is relative, it is evaluated relative
- * to Dir.
- */
- path: string
+ interface JsonArray extends Array{}
+ interface JsonArray {
/**
- * Args holds command line arguments, including the command as Args[0].
- * If the Args field is empty or nil, Run uses {Path}.
- *
- * In typical use, both Path and Args are set by calling Command.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- args: Array
+ marshalJSON(): string|Array
+ }
+ interface JsonArray {
/**
- * Env specifies the environment of the process.
- * Each entry is of the form "key=value".
- * If Env is nil, the new process uses the current process's
- * environment.
- * If Env contains duplicate environment keys, only the last
- * value in the slice for each duplicate key is used.
- * As a special case on Windows, SYSTEMROOT is always added if
- * missing and not explicitly set to the empty string.
+ * Value implements the [driver.Valuer] interface.
*/
- env: Array
+ value(): any
+ }
+ interface JsonArray {
/**
- * Dir specifies the working directory of the command.
- * If Dir is the empty string, Run runs the command in the
- * calling process's current directory.
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current JsonArray[T] instance.
*/
- dir: string
+ scan(value: any): void
+ }
+ /**
+ * JsonMap defines a map that is safe for json and db read/write.
+ */
+ interface JsonMap extends _TygojaDict{}
+ interface JsonMap {
/**
- * Stdin specifies the process's standard input.
- *
- * If Stdin is nil, the process reads from the null device (os.DevNull).
- *
- * If Stdin is an *os.File, the process's standard input is connected
- * directly to that file.
- *
- * Otherwise, during the execution of the command a separate
- * goroutine reads from Stdin and delivers that data to the command
- * over a pipe. In this case, Wait does not complete until the goroutine
- * stops copying, either because it has reached the end of Stdin
- * (EOF or a read error), or because writing to the pipe returned an error,
- * or because a nonzero WaitDelay was set and expired.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- stdin: io.Reader
+ marshalJSON(): string|Array
+ }
+ interface JsonMap {
/**
- * Stdout and Stderr specify the process's standard output and error.
- *
- * If either is nil, Run connects the corresponding file descriptor
- * to the null device (os.DevNull).
- *
- * If either is an *os.File, the corresponding output from the process
- * is connected directly to that file.
- *
- * Otherwise, during the execution of the command a separate goroutine
- * reads from the process over a pipe and delivers that data to the
- * corresponding Writer. In this case, Wait does not complete until the
- * goroutine reaches EOF or encounters an error or a nonzero WaitDelay
- * expires.
+ * Get retrieves a single value from the current JsonMap.
*
- * If Stdout and Stderr are the same writer, and have a type that can
- * be compared with ==, at most one goroutine at a time will call Write.
+ * This helper was added primarily to assist the goja integration since custom map types
+ * don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
*/
- stdout: io.Writer
- stderr: io.Writer
+ get(key: string): any
+ }
+ interface JsonMap {
/**
- * ExtraFiles specifies additional open files to be inherited by the
- * new process. It does not include standard input, standard output, or
- * standard error. If non-nil, entry i becomes file descriptor 3+i.
+ * Set sets a single value in the current JsonMap.
*
- * ExtraFiles is not supported on Windows.
+ * This helper was added primarily to assist the goja integration since custom map types
+ * don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
*/
- extraFiles: Array<(os.File | undefined)>
+ set(key: string, value: any): void
+ }
+ interface JsonMap {
/**
- * SysProcAttr holds optional, operating system-specific attributes.
- * Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
+ * Value implements the [driver.Valuer] interface.
*/
- sysProcAttr?: syscall.SysProcAttr
+ value(): any
+ }
+ interface JsonMap {
/**
- * Process is the underlying process, once started.
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current `JsonMap` instance.
*/
- process?: os.Process
+ scan(value: any): void
+ }
+}
+
+/**
+ * Package schema implements custom Schema and SchemaField datatypes
+ * for handling the Collection schema definitions.
+ */
+namespace schema {
+ // @ts-ignore
+ import validation = ozzo_validation
+ /**
+ * Schema defines a dynamic db schema as a slice of `SchemaField`s.
+ */
+ interface Schema {
+ }
+ interface Schema {
/**
- * ProcessState contains information about an exited process.
- * If the process was started successfully, Wait or Run will
- * populate its ProcessState when the command completes.
+ * Fields returns the registered schema fields.
*/
- processState?: os.ProcessState
- err: Error // LookPath error, if any.
+ fields(): Array<(SchemaField | undefined)>
+ }
+ interface Schema {
/**
- * If Cancel is non-nil, the command must have been created with
- * CommandContext and Cancel will be called when the command's
- * Context is done. By default, CommandContext sets Cancel to
- * call the Kill method on the command's Process.
- *
- * Typically a custom Cancel will send a signal to the command's
- * Process, but it may instead take other actions to initiate cancellation,
- * such as closing a stdin or stdout pipe or sending a shutdown request on a
- * network socket.
- *
- * If the command exits with a success status after Cancel is
- * called, and Cancel does not return an error equivalent to
- * os.ErrProcessDone, then Wait and similar methods will return a non-nil
- * error: either an error wrapping the one returned by Cancel,
- * or the error from the Context.
- * (If the command exits with a non-success status, or Cancel
- * returns an error that wraps os.ErrProcessDone, Wait and similar methods
- * continue to return the command's usual exit status.)
- *
- * If Cancel is set to nil, nothing will happen immediately when the command's
- * Context is done, but a nonzero WaitDelay will still take effect. That may
- * be useful, for example, to work around deadlocks in commands that do not
- * support shutdown signals but are expected to always finish quickly.
- *
- * Cancel will not be called if Start returns a non-nil error.
+ * InitFieldsOptions calls `InitOptions()` for all schema fields.
*/
- cancel: () => void
+ initFieldsOptions(): void
+ }
+ interface Schema {
/**
- * If WaitDelay is non-zero, it bounds the time spent waiting on two sources
- * of unexpected delay in Wait: a child process that fails to exit after the
- * associated Context is canceled, and a child process that exits but leaves
- * its I/O pipes unclosed.
- *
- * The WaitDelay timer starts when either the associated Context is done or a
- * call to Wait observes that the child process has exited, whichever occurs
- * first. When the delay has elapsed, the command shuts down the child process
- * and/or its I/O pipes.
- *
- * If the child process has failed to exit — perhaps because it ignored or
- * failed to receive a shutdown signal from a Cancel function, or because no
- * Cancel function was set — then it will be terminated using os.Process.Kill.
- *
- * Then, if the I/O pipes communicating with the child process are still open,
- * those pipes are closed in order to unblock any goroutines currently blocked
- * on Read or Write calls.
- *
- * If pipes are closed due to WaitDelay, no Cancel call has occurred,
- * and the command has otherwise exited with a successful status, Wait and
- * similar methods will return ErrWaitDelay instead of nil.
- *
- * If WaitDelay is zero (the default), I/O pipes will be read until EOF,
- * which might not occur until orphaned subprocesses of the command have
- * also closed their descriptors for the pipes.
+ * Clone creates a deep clone of the current schema.
*/
- waitDelay: time.Duration
+ clone(): (Schema)
}
- interface Cmd {
+ interface Schema {
/**
- * String returns a human-readable description of c.
- * It is intended only for debugging.
- * In particular, it is not suitable for use as input to a shell.
- * The output of String may vary across Go releases.
+ * AsMap returns a map with all registered schema field.
+ * The returned map is indexed with each field name.
*/
- string(): string
+ asMap(): _TygojaDict
}
- interface Cmd {
+ interface Schema {
/**
- * Run starts the specified command and waits for it to complete.
- *
- * The returned error is nil if the command runs, has no problems
- * copying stdin, stdout, and stderr, and exits with a zero exit
- * status.
- *
- * If the command starts but does not complete successfully, the error is of
- * type *ExitError. Other error types may be returned for other situations.
- *
- * If the calling goroutine has locked the operating system thread
- * with runtime.LockOSThread and modified any inheritable OS-level
- * thread state (for example, Linux or Plan 9 name spaces), the new
- * process will inherit the caller's thread state.
+ * GetFieldById returns a single field by its id.
*/
- run(): void
+ getFieldById(id: string): (SchemaField)
}
- interface Cmd {
+ interface Schema {
/**
- * Start starts the specified command but does not wait for it to complete.
- *
- * If Start returns successfully, the c.Process field will be set.
- *
- * After a successful call to Start the Wait method must be called in
- * order to release associated system resources.
+ * GetFieldByName returns a single field by its name.
*/
- start(): void
+ getFieldByName(name: string): (SchemaField)
}
- interface Cmd {
+ interface Schema {
/**
- * Wait waits for the command to exit and waits for any copying to
- * stdin or copying from stdout or stderr to complete.
- *
- * The command must have been started by Start.
- *
- * The returned error is nil if the command runs, has no problems
- * copying stdin, stdout, and stderr, and exits with a zero exit
- * status.
- *
- * If the command fails to run or doesn't complete successfully, the
- * error is of type *ExitError. Other error types may be
- * returned for I/O problems.
- *
- * If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also waits
- * for the respective I/O loop copying to or from the process to complete.
+ * RemoveField removes a single schema field by its id.
*
- * Wait releases any resources associated with the Cmd.
+ * This method does nothing if field with `id` doesn't exist.
*/
- wait(): void
+ removeField(id: string): void
}
- interface Cmd {
+ interface Schema {
/**
- * Output runs the command and returns its standard output.
- * Any returned error will usually be of type *ExitError.
- * If c.Stderr was nil, Output populates ExitError.Stderr.
+ * AddField registers the provided newField to the current schema.
+ *
+ * If field with `newField.Id` already exist, the existing field is
+ * replaced with the new one.
+ *
+ * Otherwise the new field is appended to the other schema fields.
*/
- output(): string|Array
+ addField(newField: SchemaField): void
}
- interface Cmd {
+ interface Schema {
/**
- * CombinedOutput runs the command and returns its combined standard
- * output and standard error.
+ * Validate makes Schema validatable by implementing [validation.Validatable] interface.
+ *
+ * Internally calls each individual field's validator and additionally
+ * checks for invalid renamed fields and field name duplications.
*/
- combinedOutput(): string|Array
+ validate(): void
}
- interface Cmd {
+ interface Schema {
/**
- * StdinPipe returns a pipe that will be connected to the command's
- * standard input when the command starts.
- * The pipe will be closed automatically after Wait sees the command exit.
- * A caller need only call Close to force the pipe to close sooner.
- * For example, if the command being run will not exit until standard input
- * is closed, the caller must close the pipe.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- stdinPipe(): io.WriteCloser
+ marshalJSON(): string|Array
}
- interface Cmd {
+ interface Schema {
/**
- * StdoutPipe returns a pipe that will be connected to the command's
- * standard output when the command starts.
+ * UnmarshalJSON implements the [json.Unmarshaler] interface.
*
- * Wait will close the pipe after seeing the command exit, so most callers
- * need not close the pipe themselves. It is thus incorrect to call Wait
- * before all reads from the pipe have completed.
- * For the same reason, it is incorrect to call Run when using StdoutPipe.
- * See the example for idiomatic usage.
+ * On success, all schema field options are auto initialized.
*/
- stdoutPipe(): io.ReadCloser
+ unmarshalJSON(data: string|Array): void
}
- interface Cmd {
+ interface Schema {
/**
- * StderrPipe returns a pipe that will be connected to the command's
- * standard error when the command starts.
- *
- * Wait will close the pipe after seeing the command exit, so most callers
- * need not close the pipe themselves. It is thus incorrect to call Wait
- * before all reads from the pipe have completed.
- * For the same reason, it is incorrect to use Run when using StderrPipe.
- * See the StdoutPipe example for idiomatic usage.
+ * Value implements the [driver.Valuer] interface.
*/
- stderrPipe(): io.ReadCloser
+ value(): any
}
- interface Cmd {
+ interface Schema {
/**
- * Environ returns a copy of the environment in which the command would be run
- * as it is currently configured.
+ * Scan implements [sql.Scanner] interface to scan the provided value
+ * into the current Schema instance.
*/
- environ(): Array
+ scan(value: any): void
}
}
/**
- * Package sql provides a generic interface around SQL (or SQL-like)
- * databases.
- *
- * The sql package must be used in conjunction with a database driver.
- * See https://golang.org/s/sqldrivers for a list of drivers.
- *
- * Drivers that do not support context cancellation will not return until
- * after the query is completed.
- *
- * For usage examples, see the wiki page at
- * https://golang.org/s/sqlwiki.
+ * Package models implements all PocketBase DB models and DTOs.
*/
-namespace sql {
- /**
- * TxOptions holds the transaction options to be used in DB.BeginTx.
- */
- interface TxOptions {
+namespace models {
+ type _subxyKhr = BaseModel
+ interface Admin extends _subxyKhr {
+ avatar: number
+ email: string
+ tokenKey: string
+ passwordHash: string
+ lastResetSentAt: types.DateTime
+ }
+ interface Admin {
/**
- * Isolation is the transaction isolation level.
- * If zero, the driver or database's default level is used.
+ * TableName returns the Admin model SQL table name.
*/
- isolation: IsolationLevel
- readOnly: boolean
- }
- /**
- * DB is a database handle representing a pool of zero or more
- * underlying connections. It's safe for concurrent use by multiple
- * goroutines.
- *
- * The sql package creates and frees connections automatically; it
- * also maintains a free pool of idle connections. If the database has
- * a concept of per-connection state, such state can be reliably observed
- * within a transaction (Tx) or connection (Conn). Once DB.Begin is called, the
- * returned Tx is bound to a single connection. Once Commit or
- * Rollback is called on the transaction, that transaction's
- * connection is returned to DB's idle connection pool. The pool size
- * can be controlled with SetMaxIdleConns.
- */
- interface DB {
+ tableName(): string
}
- interface DB {
+ interface Admin {
/**
- * PingContext verifies a connection to the database is still alive,
- * establishing a connection if necessary.
+ * ValidatePassword validates a plain password against the model's password.
*/
- pingContext(ctx: context.Context): void
+ validatePassword(password: string): boolean
}
- interface DB {
+ interface Admin {
/**
- * Ping verifies a connection to the database is still alive,
- * establishing a connection if necessary.
+ * SetPassword sets cryptographically secure string to `model.Password`.
*
- * Ping uses context.Background internally; to specify the context, use
- * PingContext.
+ * Additionally this method also resets the LastResetSentAt and the TokenKey fields.
*/
- ping(): void
+ setPassword(password: string): void
}
- interface DB {
+ interface Admin {
/**
- * Close closes the database and prevents new queries from starting.
- * Close then waits for all queries that have started processing on the server
- * to finish.
- *
- * It is rare to Close a DB, as the DB handle is meant to be
- * long-lived and shared between many goroutines.
+ * RefreshTokenKey generates and sets new random token key.
*/
- close(): void
+ refreshTokenKey(): void
}
- interface DB {
+ // @ts-ignore
+ import validation = ozzo_validation
+ type _subUyFbk = BaseModel
+ interface Collection extends _subUyFbk {
+ name: string
+ type: string
+ system: boolean
+ schema: schema.Schema
+ indexes: types.JsonArray
/**
- * SetMaxIdleConns sets the maximum number of connections in the idle
- * connection pool.
- *
- * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
- * then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
- *
- * If n <= 0, no idle connections are retained.
- *
- * The default max idle connections is currently 2. This may change in
- * a future release.
+ * rules
*/
- setMaxIdleConns(n: number): void
+ listRule?: string
+ viewRule?: string
+ createRule?: string
+ updateRule?: string
+ deleteRule?: string
+ options: types.JsonMap
}
- interface DB {
+ interface Collection {
/**
- * SetMaxOpenConns sets the maximum number of open connections to the database.
- *
- * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
- * MaxIdleConns, then MaxIdleConns will be reduced to match the new
- * MaxOpenConns limit.
- *
- * If n <= 0, then there is no limit on the number of open connections.
- * The default is 0 (unlimited).
+ * TableName returns the Collection model SQL table name.
*/
- setMaxOpenConns(n: number): void
+ tableName(): string
}
- interface DB {
+ interface Collection {
/**
- * SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
- *
- * Expired connections may be closed lazily before reuse.
- *
- * If d <= 0, connections are not closed due to a connection's age.
+ * BaseFilesPath returns the storage dir path used by the collection.
*/
- setConnMaxLifetime(d: time.Duration): void
+ baseFilesPath(): string
}
- interface DB {
+ interface Collection {
/**
- * SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
- *
- * Expired connections may be closed lazily before reuse.
- *
- * If d <= 0, connections are not closed due to a connection's idle time.
+ * IsBase checks if the current collection has "base" type.
*/
- setConnMaxIdleTime(d: time.Duration): void
+ isBase(): boolean
}
- interface DB {
+ interface Collection {
/**
- * Stats returns database statistics.
+ * IsAuth checks if the current collection has "auth" type.
*/
- stats(): DBStats
+ isAuth(): boolean
}
- interface DB {
+ interface Collection {
/**
- * PrepareContext creates a prepared statement for later queries or executions.
- * Multiple queries or executions may be run concurrently from the
- * returned statement.
- * The caller must call the statement's Close method
- * when the statement is no longer needed.
- *
- * The provided context is used for the preparation of the statement, not for the
- * execution of the statement.
+ * IsView checks if the current collection has "view" type.
*/
- prepareContext(ctx: context.Context, query: string): (Stmt)
+ isView(): boolean
}
- interface DB {
+ interface Collection {
/**
- * Prepare creates a prepared statement for later queries or executions.
- * Multiple queries or executions may be run concurrently from the
- * returned statement.
- * The caller must call the statement's Close method
- * when the statement is no longer needed.
- *
- * Prepare uses context.Background internally; to specify the context, use
- * PrepareContext.
+ * MarshalJSON implements the [json.Marshaler] interface.
*/
- prepare(query: string): (Stmt)
+ marshalJSON(): string|Array
}
- interface DB {
+ interface Collection {
/**
- * ExecContext executes a query without returning any rows.
- * The args are for any placeholder parameters in the query.
+ * BaseOptions decodes the current collection options and returns them
+ * as new [CollectionBaseOptions] instance.
*/
- execContext(ctx: context.Context, query: string, ...args: any[]): Result
+ baseOptions(): CollectionBaseOptions
}
- interface DB {
+ interface Collection {
/**
- * Exec executes a query without returning any rows.
- * The args are for any placeholder parameters in the query.
- *
- * Exec uses context.Background internally; to specify the context, use
- * ExecContext.
+ * AuthOptions decodes the current collection options and returns them
+ * as new [CollectionAuthOptions] instance.
*/
- exec(query: string, ...args: any[]): Result
+ authOptions(): CollectionAuthOptions
}
- interface DB {
+ interface Collection {
/**
- * QueryContext executes a query that returns rows, typically a SELECT.
- * The args are for any placeholder parameters in the query.
+ * ViewOptions decodes the current collection options and returns them
+ * as new [CollectionViewOptions] instance.
*/
- queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
+ viewOptions(): CollectionViewOptions
}
- interface DB {
+ interface Collection {
/**
- * Query executes a query that returns rows, typically a SELECT.
- * The args are for any placeholder parameters in the query.
- *
- * Query uses context.Background internally; to specify the context, use
- * QueryContext.
+ * NormalizeOptions updates the current collection options with a
+ * new normalized state based on the collection type.
*/
- query(query: string, ...args: any[]): (Rows)
+ normalizeOptions(): void
}
- interface DB {
+ interface Collection {
/**
- * QueryRowContext executes a query that is expected to return at most one row.
- * QueryRowContext always returns a non-nil value. Errors are deferred until
- * Row's Scan method is called.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
+ * DecodeOptions decodes the current collection options into the
+ * provided "result" (must be a pointer).
*/
- queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+ decodeOptions(result: any): void
}
- interface DB {
+ interface Collection {
/**
- * QueryRow executes a query that is expected to return at most one row.
- * QueryRow always returns a non-nil value. Errors are deferred until
- * Row's Scan method is called.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
- *
- * QueryRow uses context.Background internally; to specify the context, use
- * QueryRowContext.
+ * SetOptions normalizes and unmarshals the specified options into m.Options.
*/
- queryRow(query: string, ...args: any[]): (Row)
+ setOptions(typedOptions: any): void
}
- interface DB {
+ type _subeDKbD = BaseModel
+ interface ExternalAuth extends _subeDKbD {
+ collectionId: string
+ recordId: string
+ provider: string
+ providerId: string
+ }
+ interface ExternalAuth {
+ tableName(): string
+ }
+ type _subkdarp = BaseModel
+ interface Record extends _subkdarp {
+ }
+ interface Record {
/**
- * BeginTx starts a transaction.
- *
- * The provided context is used until the transaction is committed or rolled back.
- * If the context is canceled, the sql package will roll back
- * the transaction. Tx.Commit will return an error if the context provided to
- * BeginTx is canceled.
- *
- * The provided TxOptions is optional and may be nil if defaults should be used.
- * If a non-default isolation level is used that the driver doesn't support,
- * an error will be returned.
+ * TableName returns the table name associated to the current Record model.
*/
- beginTx(ctx: context.Context, opts: TxOptions): (Tx)
+ tableName(): string
}
- interface DB {
+ interface Record {
/**
- * Begin starts a transaction. The default isolation level is dependent on
- * the driver.
- *
- * Begin uses context.Background internally; to specify the context, use
- * BeginTx.
+ * Collection returns the Collection model associated to the current Record model.
*/
- begin(): (Tx)
+ collection(): (Collection)
}
- interface DB {
+ interface Record {
/**
- * Driver returns the database's underlying driver.
+ * OriginalCopy returns a copy of the current record model populated
+ * with its ORIGINAL data state (aka. the initially loaded) and
+ * everything else reset to the defaults.
*/
- driver(): any
+ originalCopy(): (Record)
}
- interface DB {
+ interface Record {
/**
- * Conn returns a single connection by either opening a new connection
- * or returning an existing connection from the connection pool. Conn will
- * block until either a connection is returned or ctx is canceled.
- * Queries run on the same Conn will be run in the same database session.
- *
- * Every Conn must be returned to the database pool after use by
- * calling Conn.Close.
+ * CleanCopy returns a copy of the current record model populated only
+ * with its LATEST data state and everything else reset to the defaults.
*/
- conn(ctx: context.Context): (Conn)
- }
- /**
- * Tx is an in-progress database transaction.
- *
- * A transaction must end with a call to Commit or Rollback.
- *
- * After a call to Commit or Rollback, all operations on the
- * transaction fail with ErrTxDone.
- *
- * The statements prepared for a transaction by calling
- * the transaction's Prepare or Stmt methods are closed
- * by the call to Commit or Rollback.
- */
- interface Tx {
+ cleanCopy(): (Record)
}
- interface Tx {
+ interface Record {
/**
- * Commit commits the transaction.
+ * Expand returns a shallow copy of the current Record model expand data.
*/
- commit(): void
+ expand(): _TygojaDict
}
- interface Tx {
+ interface Record {
/**
- * Rollback aborts the transaction.
+ * SetExpand shallow copies the provided data to the current Record model's expand.
*/
- rollback(): void
+ setExpand(expand: _TygojaDict): void
}
- interface Tx {
+ interface Record {
/**
- * PrepareContext creates a prepared statement for use within a transaction.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * To use an existing prepared statement on this transaction, see Tx.Stmt.
+ * MergeExpand merges recursively the provided expand data into
+ * the current model's expand (if any).
*
- * The provided context will be used for the preparation of the context, not
- * for the execution of the returned statement. The returned statement
- * will run in the transaction context.
+ * Note that if an expanded prop with the same key is a slice (old or new expand)
+ * then both old and new records will be merged into a new slice (aka. a :merge: [b,c] => [a,b,c]).
+ * Otherwise the "old" expanded record will be replace with the "new" one (aka. a :merge: aNew => aNew).
*/
- prepareContext(ctx: context.Context, query: string): (Stmt)
+ mergeExpand(expand: _TygojaDict): void
}
- interface Tx {
+ interface Record {
/**
- * Prepare creates a prepared statement for use within a transaction.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * To use an existing prepared statement on this transaction, see Tx.Stmt.
- *
- * Prepare uses context.Background internally; to specify the context, use
- * PrepareContext.
+ * SchemaData returns a shallow copy ONLY of the defined record schema fields data.
*/
- prepare(query: string): (Stmt)
+ schemaData(): _TygojaDict
}
- interface Tx {
+ interface Record {
/**
- * StmtContext returns a transaction-specific prepared statement from
- * an existing statement.
- *
- * Example:
- *
- * ```
- * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
- * ...
- * tx, err := db.Begin()
- * ...
- * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
- * ```
- *
- * The provided context is used for the preparation of the statement, not for the
- * execution of the statement.
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
+ * UnknownData returns a shallow copy ONLY of the unknown record fields data,
+ * aka. fields that are neither one of the base and special system ones,
+ * nor defined by the collection schema.
*/
- stmtContext(ctx: context.Context, stmt: Stmt): (Stmt)
+ unknownData(): _TygojaDict
}
- interface Tx {
+ interface Record {
/**
- * Stmt returns a transaction-specific prepared statement from
- * an existing statement.
- *
- * Example:
- *
- * ```
- * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
- * ...
- * tx, err := db.Begin()
- * ...
- * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
- * ```
- *
- * The returned statement operates within the transaction and will be closed
- * when the transaction has been committed or rolled back.
- *
- * Stmt uses context.Background internally; to specify the context, use
- * StmtContext.
+ * IgnoreEmailVisibility toggles the flag to ignore the auth record email visibility check.
*/
- stmt(stmt: Stmt): (Stmt)
+ ignoreEmailVisibility(state: boolean): void
}
- interface Tx {
+ interface Record {
/**
- * ExecContext executes a query that doesn't return rows.
- * For example: an INSERT and UPDATE.
+ * WithUnknownData toggles the export/serialization of unknown data fields
+ * (false by default).
*/
- execContext(ctx: context.Context, query: string, ...args: any[]): Result
+ withUnknownData(state: boolean): void
}
- interface Tx {
+ interface Record {
/**
- * Exec executes a query that doesn't return rows.
- * For example: an INSERT and UPDATE.
+ * Set sets the provided key-value data pair for the current Record model.
*
- * Exec uses context.Background internally; to specify the context, use
- * ExecContext.
+ * If the record collection has field with name matching the provided "key",
+ * the value will be further normalized according to the field rules.
*/
- exec(query: string, ...args: any[]): Result
+ set(key: string, value: any): void
}
- interface Tx {
+ interface Record {
/**
- * QueryContext executes a query that returns rows, typically a SELECT.
+ * Get returns a normalized single record model data value for "key".
*/
- queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows)
+ get(key: string): any
}
- interface Tx {
+ interface Record {
/**
- * Query executes a query that returns rows, typically a SELECT.
- *
- * Query uses context.Background internally; to specify the context, use
- * QueryContext.
+ * GetBool returns the data value for "key" as a bool.
*/
- query(query: string, ...args: any[]): (Rows)
+ getBool(key: string): boolean
}
- interface Tx {
+ interface Record {
/**
- * QueryRowContext executes a query that is expected to return at most one row.
- * QueryRowContext always returns a non-nil value. Errors are deferred until
- * Row's Scan method is called.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
+ * GetString returns the data value for "key" as a string.
*/
- queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row)
+ getString(key: string): string
}
- interface Tx {
+ interface Record {
/**
- * QueryRow executes a query that is expected to return at most one row.
- * QueryRow always returns a non-nil value. Errors are deferred until
- * Row's Scan method is called.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
- *
- * QueryRow uses context.Background internally; to specify the context, use
- * QueryRowContext.
+ * GetInt returns the data value for "key" as an int.
*/
- queryRow(query: string, ...args: any[]): (Row)
+ getInt(key: string): number
}
- /**
- * Stmt is a prepared statement.
- * A Stmt is safe for concurrent use by multiple goroutines.
- *
- * If a Stmt is prepared on a Tx or Conn, it will be bound to a single
- * underlying connection forever. If the Tx or Conn closes, the Stmt will
- * become unusable and all operations will return an error.
- * If a Stmt is prepared on a DB, it will remain usable for the lifetime of the
- * DB. When the Stmt needs to execute on a new underlying connection, it will
- * prepare itself on the new connection automatically.
- */
- interface Stmt {
+ interface Record {
+ /**
+ * GetFloat returns the data value for "key" as a float64.
+ */
+ getFloat(key: string): number
}
- interface Stmt {
+ interface Record {
/**
- * ExecContext executes a prepared statement with the given arguments and
- * returns a Result summarizing the effect of the statement.
+ * GetTime returns the data value for "key" as a [time.Time] instance.
*/
- execContext(ctx: context.Context, ...args: any[]): Result
+ getTime(key: string): time.Time
}
- interface Stmt {
+ interface Record {
/**
- * Exec executes a prepared statement with the given arguments and
- * returns a Result summarizing the effect of the statement.
- *
- * Exec uses context.Background internally; to specify the context, use
- * ExecContext.
+ * GetDateTime returns the data value for "key" as a DateTime instance.
*/
- exec(...args: any[]): Result
+ getDateTime(key: string): types.DateTime
}
- interface Stmt {
+ interface Record {
/**
- * QueryContext executes a prepared query statement with the given arguments
- * and returns the query results as a *Rows.
+ * GetStringSlice returns the data value for "key" as a slice of unique strings.
*/
- queryContext(ctx: context.Context, ...args: any[]): (Rows)
+ getStringSlice(key: string): Array
}
- interface Stmt {
+ interface Record {
/**
- * Query executes a prepared query statement with the given arguments
- * and returns the query results as a *Rows.
+ * ExpandedOne retrieves a single relation Record from the already
+ * loaded expand data of the current model.
+ *
+ * If the requested expand relation is multiple, this method returns
+ * only first available Record from the expanded relation.
*
- * Query uses context.Background internally; to specify the context, use
- * QueryContext.
+ * Returns nil if there is no such expand relation loaded.
*/
- query(...args: any[]): (Rows)
+ expandedOne(relField: string): (Record)
}
- interface Stmt {
+ interface Record {
/**
- * QueryRowContext executes a prepared query statement with the given arguments.
- * If an error occurs during the execution of the statement, that error will
- * be returned by a call to Scan on the returned *Row, which is always non-nil.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
+ * ExpandedAll retrieves a slice of relation Records from the already
+ * loaded expand data of the current model.
+ *
+ * If the requested expand relation is single, this method normalizes
+ * the return result and will wrap the single model as a slice.
+ *
+ * Returns nil slice if there is no such expand relation loaded.
*/
- queryRowContext(ctx: context.Context, ...args: any[]): (Row)
+ expandedAll(relField: string): Array<(Record | undefined)>
}
- interface Stmt {
+ interface Record {
/**
- * QueryRow executes a prepared query statement with the given arguments.
- * If an error occurs during the execution of the statement, that error will
- * be returned by a call to Scan on the returned *Row, which is always non-nil.
- * If the query selects no rows, the *Row's Scan will return ErrNoRows.
- * Otherwise, the *Row's Scan scans the first selected row and discards
- * the rest.
+ * Retrieves the "key" json field value and unmarshals it into "result".
*
- * Example usage:
+ * Example
*
* ```
- * var name string
- * err := nameByUseridStmt.QueryRow(id).Scan(&name)
+ * result := struct {
+ * FirstName string `json:"first_name"`
+ * }{}
+ * err := m.UnmarshalJSONField("my_field_name", &result)
* ```
- *
- * QueryRow uses context.Background internally; to specify the context, use
- * QueryRowContext.
*/
- queryRow(...args: any[]): (Row)
+ unmarshalJSONField(key: string, result: any): void
}
- interface Stmt {
+ interface Record {
/**
- * Close closes the statement.
+ * BaseFilesPath returns the storage dir path used by the record.
*/
- close(): void
+ baseFilesPath(): string
}
- /**
- * Rows is the result of a query. Its cursor starts before the first row
- * of the result set. Use Next to advance from row to row.
- */
- interface Rows {
+ interface Record {
+ /**
+ * FindFileFieldByFile returns the first file type field for which
+ * any of the record's data contains the provided filename.
+ */
+ findFileFieldByFile(filename: string): (schema.SchemaField)
}
- interface Rows {
+ interface Record {
/**
- * Next prepares the next result row for reading with the Scan method. It
- * returns true on success, or false if there is no next result row or an error
- * happened while preparing it. Err should be consulted to distinguish between
- * the two cases.
- *
- * Every call to Scan, even the first one, must be preceded by a call to Next.
+ * Load bulk loads the provided data into the current Record model.
*/
- next(): boolean
+ load(data: _TygojaDict): void
}
- interface Rows {
+ interface Record {
/**
- * NextResultSet prepares the next result set for reading. It reports whether
- * there is further result sets, or false if there is no further result set
- * or if there is an error advancing to it. The Err method should be consulted
- * to distinguish between the two cases.
- *
- * After calling NextResultSet, the Next method should always be called before
- * scanning. If there are further result sets they may not have rows in the result
- * set.
+ * ColumnValueMap implements [ColumnValueMapper] interface.
*/
- nextResultSet(): boolean
+ columnValueMap(): _TygojaDict
}
- interface Rows {
+ interface Record {
/**
- * Err returns the error, if any, that was encountered during iteration.
- * Err may be called after an explicit or implicit Close.
+ * PublicExport exports only the record fields that are safe to be public.
+ *
+ * For auth records, to force the export of the email field you need to set
+ * `m.IgnoreEmailVisibility(true)`.
*/
- err(): void
+ publicExport(): _TygojaDict
}
- interface Rows {
+ interface Record {
/**
- * Columns returns the column names.
- * Columns returns an error if the rows are closed.
+ * MarshalJSON implements the [json.Marshaler] interface.
+ *
+ * Only the data exported by `PublicExport()` will be serialized.
*/
- columns(): Array
+ marshalJSON(): string|Array
}
- interface Rows {
+ interface Record {
/**
- * ColumnTypes returns column information such as column type, length,
- * and nullable. Some information may not be available from some drivers.
+ * UnmarshalJSON implements the [json.Unmarshaler] interface.
*/
- columnTypes(): Array<(ColumnType | undefined)>
+ unmarshalJSON(data: string|Array): void
}
- interface Rows {
+ interface Record {
/**
- * Scan copies the columns in the current row into the values pointed
- * at by dest. The number of values in dest must be the same as the
- * number of columns in Rows.
+ * ReplaceModifers returns a new map with applied modifier
+ * values based on the current record and the specified data.
*
- * Scan converts columns read from the database into the following
- * common Go types and special types provided by the sql package:
+ * The resolved modifier keys will be removed.
+ *
+ * Multiple modifiers will be applied one after another,
+ * while reusing the previous base key value result (eg. 1; -5; +2 => -2).
+ *
+ * Example usage:
*
* ```
- * *string
- * *[]byte
- * *int, *int8, *int16, *int32, *int64
- * *uint, *uint8, *uint16, *uint32, *uint64
- * *bool
- * *float32, *float64
- * *interface{}
- * *RawBytes
- * *Rows (cursor value)
- * any type implementing Scanner (see Scanner docs)
+ * newData := record.ReplaceModifers(data)
+ * // record: {"field": 10}
+ * // data: {"field+": 5}
+ * // newData: {"field": 15}
* ```
+ */
+ replaceModifers(data: _TygojaDict): _TygojaDict
+ }
+ interface Record {
+ /**
+ * Username returns the "username" auth record data value.
+ */
+ username(): string
+ }
+ interface Record {
+ /**
+ * SetUsername sets the "username" auth record data value.
*
- * In the most simple case, if the type of the value from the source
- * column is an integer, bool or string type T and dest is of type *T,
- * Scan simply assigns the value through the pointer.
- *
- * Scan also converts between string and numeric types, as long as no
- * information would be lost. While Scan stringifies all numbers
- * scanned from numeric database columns into *string, scans into
- * numeric types are checked for overflow. For example, a float64 with
- * value 300 or a string with value "300" can scan into a uint16, but
- * not into a uint8, though float64(255) or "255" can scan into a
- * uint8. One exception is that scans of some float64 numbers to
- * strings may lose information when stringifying. In general, scan
- * floating point columns into *float64.
- *
- * If a dest argument has type *[]byte, Scan saves in that argument a
- * copy of the corresponding data. The copy is owned by the caller and
- * can be modified and held indefinitely. The copy can be avoided by
- * using an argument of type *RawBytes instead; see the documentation
- * for RawBytes for restrictions on its use.
- *
- * If an argument has type *interface{}, Scan copies the value
- * provided by the underlying driver without conversion. When scanning
- * from a source value of type []byte to *interface{}, a copy of the
- * slice is made and the caller owns the result.
- *
- * Source values of type time.Time may be scanned into values of type
- * *time.Time, *interface{}, *string, or *[]byte. When converting to
- * the latter two, time.RFC3339Nano is used.
- *
- * Source values of type bool may be scanned into types *bool,
- * *interface{}, *string, *[]byte, or *RawBytes.
- *
- * For scanning into *bool, the source may be true, false, 1, 0, or
- * string inputs parseable by strconv.ParseBool.
- *
- * Scan can also convert a cursor returned from a query, such as
- * "select cursor(select * from my_table) from dual", into a
- * *Rows value that can itself be scanned from. The parent
- * select query will close any cursor *Rows if the parent *Rows is closed.
+ * This method doesn't check whether the provided value is a valid username.
*
- * If any of the first arguments implementing Scanner returns an error,
- * that error will be wrapped in the returned error.
+ * Returns an error if the record is not from an auth collection.
*/
- scan(...dest: any[]): void
+ setUsername(username: string): void
}
- interface Rows {
+ interface Record {
/**
- * Close closes the Rows, preventing further enumeration. If Next is called
- * and returns false and there are no further result sets,
- * the Rows are closed automatically and it will suffice to check the
- * result of Err. Close is idempotent and does not affect the result of Err.
+ * Email returns the "email" auth record data value.
*/
- close(): void
+ email(): string
}
- /**
- * A Result summarizes an executed SQL command.
- */
- interface Result {
- [key:string]: any;
+ interface Record {
/**
- * LastInsertId returns the integer generated by the database
- * in response to a command. Typically this will be from an
- * "auto increment" column when inserting a new row. Not all
- * databases support this feature, and the syntax of such
- * statements varies.
+ * SetEmail sets the "email" auth record data value.
+ *
+ * This method doesn't check whether the provided value is a valid email.
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- lastInsertId(): number
+ setEmail(email: string): void
+ }
+ interface Record {
/**
- * RowsAffected returns the number of rows affected by an
- * update, insert, or delete. Not every database or database
- * driver may support this.
+ * Verified returns the "emailVisibility" auth record data value.
*/
- rowsAffected(): number
- }
-}
-
-namespace migrate {
- /**
- * MigrationsList defines a list with migration definitions
- */
- interface MigrationsList {
+ emailVisibility(): boolean
}
- interface MigrationsList {
+ interface Record {
/**
- * Item returns a single migration from the list by its index.
+ * SetEmailVisibility sets the "emailVisibility" auth record data value.
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- item(index: number): (Migration)
+ setEmailVisibility(visible: boolean): void
}
- interface MigrationsList {
+ interface Record {
/**
- * Items returns the internal migrations list slice.
+ * Verified returns the "verified" auth record data value.
*/
- items(): Array<(Migration | undefined)>
+ verified(): boolean
}
- interface MigrationsList {
+ interface Record {
/**
- * Register adds new migration definition to the list.
- *
- * If `optFilename` is not provided, it will try to get the name from its .go file.
+ * SetVerified sets the "verified" auth record data value.
*
- * The list will be sorted automatically based on the migrations file name.
+ * Returns an error if the record is not from an auth collection.
*/
- register(up: (db: dbx.Builder) => void, down: (db: dbx.Builder) => void, ...optFilename: string[]): void
+ setVerified(verified: boolean): void
}
-}
-
-namespace settings {
- // @ts-ignore
- import validation = ozzo_validation
- /**
- * Settings defines common app configuration options.
- */
- interface Settings {
- meta: MetaConfig
- logs: LogsConfig
- smtp: SmtpConfig
- s3: S3Config
- backups: BackupsConfig
- adminAuthToken: TokenConfig
- adminPasswordResetToken: TokenConfig
- adminFileToken: TokenConfig
- recordAuthToken: TokenConfig
- recordPasswordResetToken: TokenConfig
- recordEmailChangeToken: TokenConfig
- recordVerificationToken: TokenConfig
- recordFileToken: TokenConfig
+ interface Record {
/**
- * Deprecated: Will be removed in v0.9+
+ * TokenKey returns the "tokenKey" auth record data value.
*/
- emailAuth: EmailAuthConfig
- googleAuth: AuthProviderConfig
- facebookAuth: AuthProviderConfig
- githubAuth: AuthProviderConfig
- gitlabAuth: AuthProviderConfig
- discordAuth: AuthProviderConfig
- twitterAuth: AuthProviderConfig
- microsoftAuth: AuthProviderConfig
- spotifyAuth: AuthProviderConfig
- kakaoAuth: AuthProviderConfig
- twitchAuth: AuthProviderConfig
- stravaAuth: AuthProviderConfig
- giteeAuth: AuthProviderConfig
- livechatAuth: AuthProviderConfig
- giteaAuth: AuthProviderConfig
- oidcAuth: AuthProviderConfig
- oidc2Auth: AuthProviderConfig
- oidc3Auth: AuthProviderConfig
- appleAuth: AuthProviderConfig
- instagramAuth: AuthProviderConfig
- vkAuth: AuthProviderConfig
- yandexAuth: AuthProviderConfig
- patreonAuth: AuthProviderConfig
- mailcowAuth: AuthProviderConfig
+ tokenKey(): string
}
- interface Settings {
+ interface Record {
/**
- * Validate makes Settings validatable by implementing [validation.Validatable] interface.
+ * SetTokenKey sets the "tokenKey" auth record data value.
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- validate(): void
+ setTokenKey(key: string): void
}
- interface Settings {
+ interface Record {
/**
- * Merge merges `other` settings into the current one.
+ * RefreshTokenKey generates and sets new random auth record "tokenKey".
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- merge(other: Settings): void
+ refreshTokenKey(): void
}
- interface Settings {
+ interface Record {
/**
- * Clone creates a new deep copy of the current settings.
+ * LastResetSentAt returns the "lastResentSentAt" auth record data value.
*/
- clone(): (Settings)
+ lastResetSentAt(): types.DateTime
}
- interface Settings {
+ interface Record {
/**
- * RedactClone creates a new deep copy of the current settings,
- * while replacing the secret values with `******`.
+ * SetLastResetSentAt sets the "lastResentSentAt" auth record data value.
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- redactClone(): (Settings)
+ setLastResetSentAt(dateTime: types.DateTime): void
}
- interface Settings {
+ interface Record {
/**
- * NamedAuthProviderConfigs returns a map with all registered OAuth2
- * provider configurations (indexed by their name identifier).
+ * LastVerificationSentAt returns the "lastVerificationSentAt" auth record data value.
*/
- namedAuthProviderConfigs(): _TygojaDict
+ lastVerificationSentAt(): types.DateTime
}
-}
-
-/**
- * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
- * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
- */
-namespace cobra {
- interface Command {
+ interface Record {
/**
- * GenBashCompletion generates bash completion file and writes to the passed writer.
+ * SetLastVerificationSentAt sets an "lastVerificationSentAt" auth record data value.
+ *
+ * Returns an error if the record is not from an auth collection.
*/
- genBashCompletion(w: io.Writer): void
+ setLastVerificationSentAt(dateTime: types.DateTime): void
}
- interface Command {
+ interface Record {
/**
- * GenBashCompletionFile generates bash completion file.
+ * PasswordHash returns the "passwordHash" auth record data value.
*/
- genBashCompletionFile(filename: string): void
+ passwordHash(): string
}
- interface Command {
+ interface Record {
/**
- * GenBashCompletionFileV2 generates Bash completion version 2.
+ * ValidatePassword validates a plain password against the auth record password.
+ *
+ * Returns false if the password is incorrect or record is not from an auth collection.
*/
- genBashCompletionFileV2(filename: string, includeDesc: boolean): void
+ validatePassword(password: string): boolean
}
- interface Command {
+ interface Record {
/**
- * GenBashCompletionV2 generates Bash completion file version 2
- * and writes it to the passed writer.
+ * SetPassword sets cryptographically secure string to the auth record "password" field.
+ * This method also resets the "lastResetSentAt" and the "tokenKey" fields.
+ *
+ * Returns an error if the record is not from an auth collection or
+ * an empty password is provided.
*/
- genBashCompletionV2(w: io.Writer, includeDesc: boolean): void
+ setPassword(password: string): void
}
- // @ts-ignore
- import flag = pflag
/**
- * Command is just that, a command for your application.
- * E.g. 'go run ...' - 'run' is the command. Cobra requires
- * you to define the usage and description as part of your command
- * definition to ensure usability.
+ * RequestInfo defines a HTTP request data struct, usually used
+ * as part of the `@request.*` filter resolver.
*/
- interface Command {
+ interface RequestInfo {
+ context: string
+ query: _TygojaDict
+ data: _TygojaDict
+ headers: _TygojaDict
+ authRecord?: Record
+ admin?: Admin
+ method: string
+ }
+ interface RequestInfo {
/**
- * Use is the one-line usage message.
- * Recommended syntax is as follows:
- * ```
- * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
- * ... indicates that you can specify multiple values for the previous argument.
- * | indicates mutually exclusive information. You can use the argument to the left of the separator or the
- * argument to the right of the separator. You cannot use both arguments in a single use of the command.
- * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
- * optional, they are enclosed in brackets ([ ]).
- * ```
- * Example: add [-F file | -D dir]... [-f format] profile
+ * HasModifierDataKeys loosely checks if the current struct has any modifier Data keys.
*/
- use: string
+ hasModifierDataKeys(): boolean
+ }
+}
+
+/**
+ * Package echo implements high performance, minimalist Go web framework.
+ *
+ * Example:
+ *
+ * ```
+ * package main
+ *
+ * import (
+ * "github.com/labstack/echo/v5"
+ * "github.com/labstack/echo/v5/middleware"
+ * "log"
+ * "net/http"
+ * )
+ *
+ * // Handler
+ * func hello(c echo.Context) error {
+ * return c.String(http.StatusOK, "Hello, World!")
+ * }
+ *
+ * func main() {
+ * // Echo instance
+ * e := echo.New()
+ *
+ * // Middleware
+ * e.Use(middleware.Logger())
+ * e.Use(middleware.Recover())
+ *
+ * // Routes
+ * e.GET("/", hello)
+ *
+ * // Start server
+ * if err := e.Start(":8080"); err != http.ErrServerClosed {
+ * log.Fatal(err)
+ * }
+ * }
+ * ```
+ *
+ * Learn more at https://echo.labstack.com
+ */
+namespace echo {
+ /**
+ * Context represents the context of the current HTTP request. It holds request and
+ * response objects, path, path parameters, data and registered handler.
+ */
+ interface Context {
+ [key:string]: any;
/**
- * Aliases is an array of aliases that can be used instead of the first word in Use.
+ * Request returns `*http.Request`.
*/
- aliases: Array
+ request(): (http.Request)
/**
- * SuggestFor is an array of command names for which this command will be suggested -
- * similar to aliases but only suggests.
+ * SetRequest sets `*http.Request`.
*/
- suggestFor: Array
+ setRequest(r: http.Request): void
/**
- * Short is the short description shown in the 'help' output.
+ * SetResponse sets `*Response`.
*/
- short: string
+ setResponse(r: Response): void
/**
- * The group id under which this subcommand is grouped in the 'help' output of its parent.
+ * Response returns `*Response`.
*/
- groupID: string
+ response(): (Response)
/**
- * Long is the long message shown in the 'help ' output.
+ * IsTLS returns true if HTTP connection is TLS otherwise false.
*/
- long: string
+ isTLS(): boolean
/**
- * Example is examples of how to use the command.
+ * IsWebSocket returns true if HTTP connection is WebSocket otherwise false.
*/
- example: string
+ isWebSocket(): boolean
/**
- * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ * Scheme returns the HTTP protocol scheme, `http` or `https`.
*/
- validArgs: Array
+ scheme(): string
/**
- * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
- * It is a dynamic version of using ValidArgs.
- * Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ * RealIP returns the client's network address based on `X-Forwarded-For`
+ * or `X-Real-IP` request header.
+ * The behavior can be configured using `Echo#IPExtractor`.
*/
- validArgsFunction: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]
+ realIP(): string
/**
- * Expected arguments
+ * RouteInfo returns current request route information. Method, Path, Name and params if they exist for matched route.
+ * In case of 404 (route not found) and 405 (method not allowed) RouteInfo returns generic struct for these cases.
*/
- args: PositionalArgs
+ routeInfo(): RouteInfo
/**
- * ArgAliases is List of aliases for ValidArgs.
- * These are not suggested to the user in the shell completion,
- * but accepted if entered manually.
+ * Path returns the registered path for the handler.
*/
- argAliases: Array
+ path(): string
/**
- * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
- * For portability with other shells, it is recommended to instead use ValidArgsFunction
+ * PathParam returns path parameter by name.
*/
- bashCompletionFunction: string
+ pathParam(name: string): string
/**
- * Deprecated defines, if this command is deprecated and should print this string when used.
+ * PathParamDefault returns the path parameter or default value for the provided name.
+ *
+ * Notes for DefaultRouter implementation:
+ * Path parameter could be empty for cases like that:
+ * * route `/release-:version/bin` and request URL is `/release-/bin`
+ * * route `/api/:version/image.jpg` and request URL is `/api//image.jpg`
+ * but not when path parameter is last part of route path
+ * * route `/download/file.:ext` will not match request `/download/file.`
*/
- deprecated: string
+ pathParamDefault(name: string, defaultValue: string): string
/**
- * Annotations are key/value pairs that can be used by applications to identify or
- * group commands or set special options.
+ * PathParams returns path parameter values.
*/
- annotations: _TygojaDict
+ pathParams(): PathParams
/**
- * Version defines the version for this command. If this value is non-empty and the command does not
- * define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
- * will print content of the "Version" variable. A shorthand "v" flag will also be added if the
- * command does not define one.
+ * SetPathParams sets path parameters for current request.
*/
- version: string
+ setPathParams(params: PathParams): void
/**
- * The *Run functions are executed in the following order:
- * ```
- * * PersistentPreRun()
- * * PreRun()
- * * Run()
- * * PostRun()
- * * PersistentPostRun()
- * ```
- * All functions get the same args, the arguments after the command name.
- * The *PreRun and *PostRun functions will only be executed if the Run function of the current
- * command has been declared.
- *
- * PersistentPreRun: children of this command will inherit and execute.
+ * QueryParam returns the query param for the provided name.
*/
- persistentPreRun: (cmd: Command, args: Array) => void
+ queryParam(name: string): string
/**
- * PersistentPreRunE: PersistentPreRun but returns an error.
+ * QueryParamDefault returns the query param or default value for the provided name.
*/
- persistentPreRunE: (cmd: Command, args: Array) => void
+ queryParamDefault(name: string, defaultValue: string): string
/**
- * PreRun: children of this command will not inherit.
+ * QueryParams returns the query parameters as `url.Values`.
*/
- preRun: (cmd: Command, args: Array) => void
+ queryParams(): url.Values
/**
- * PreRunE: PreRun but returns an error.
+ * QueryString returns the URL query string.
*/
- preRunE: (cmd: Command, args: Array) => void
+ queryString(): string
/**
- * Run: Typically the actual work function. Most commands will only implement this.
+ * FormValue returns the form field value for the provided name.
*/
- run: (cmd: Command, args: Array) => void
+ formValue(name: string): string
/**
- * RunE: Run but returns an error.
+ * FormValueDefault returns the form field value or default value for the provided name.
*/
- runE: (cmd: Command, args: Array) => void
+ formValueDefault(name: string, defaultValue: string): string
/**
- * PostRun: run after the Run command.
+ * FormValues returns the form field values as `url.Values`.
*/
- postRun: (cmd: Command, args: Array) => void
+ formValues(): url.Values
/**
- * PostRunE: PostRun but returns an error.
+ * FormFile returns the multipart form file for the provided name.
*/
- postRunE: (cmd: Command, args: Array) => void
+ formFile(name: string): (multipart.FileHeader)
/**
- * PersistentPostRun: children of this command will inherit and execute after PostRun.
+ * MultipartForm returns the multipart form.
*/
- persistentPostRun: (cmd: Command, args: Array) => void
+ multipartForm(): (multipart.Form)
/**
- * PersistentPostRunE: PersistentPostRun but returns an error.
+ * Cookie returns the named cookie provided in the request.
*/
- persistentPostRunE: (cmd: Command, args: Array) => void
+ cookie(name: string): (http.Cookie)
/**
- * FParseErrWhitelist flag parse errors to be ignored
+ * SetCookie adds a `Set-Cookie` header in HTTP response.
*/
- fParseErrWhitelist: FParseErrWhitelist
+ setCookie(cookie: http.Cookie): void
/**
- * CompletionOptions is a set of options to control the handling of shell completion
+ * Cookies returns the HTTP cookies sent with the request.
*/
- completionOptions: CompletionOptions
+ cookies(): Array<(http.Cookie | undefined)>
/**
- * TraverseChildren parses flags on all parents before executing child command.
+ * Get retrieves data from the context.
*/
- traverseChildren: boolean
+ get(key: string): {
+ }
/**
- * Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ * Set saves data in the context.
*/
- hidden: boolean
+ set(key: string, val: {
+ }): void
/**
- * SilenceErrors is an option to quiet errors down stream.
+ * Bind binds path params, query params and the request body into provided type `i`. The default binder
+ * binds body based on Content-Type header.
*/
- silenceErrors: boolean
+ bind(i: {
+ }): void
/**
- * SilenceUsage is an option to silence usage when an error occurs.
+ * Validate validates provided `i`. It is usually called after `Context#Bind()`.
+ * Validator must be registered using `Echo#Validator`.
*/
- silenceUsage: boolean
+ validate(i: {
+ }): void
/**
- * DisableFlagParsing disables the flag parsing.
- * If this is true all flags will be passed to the command as arguments.
+ * Render renders a template with data and sends a text/html response with status
+ * code. Renderer must be registered using `Echo.Renderer`.
*/
- disableFlagParsing: boolean
+ render(code: number, name: string, data: {
+ }): void
/**
- * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
- * will be printed by generating docs for this command.
+ * HTML sends an HTTP response with status code.
*/
- disableAutoGenTag: boolean
+ html(code: number, html: string): void
/**
- * DisableFlagsInUseLine will disable the addition of [flags] to the usage
- * line of a command when printing help or generating docs
+ * HTMLBlob sends an HTTP blob response with status code.
*/
- disableFlagsInUseLine: boolean
+ htmlBlob(code: number, b: string|Array): void
/**
- * DisableSuggestions disables the suggestions based on Levenshtein distance
- * that go along with 'unknown command' messages.
+ * String sends a string response with status code.
*/
- disableSuggestions: boolean
+ string(code: number, s: string): void
/**
- * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
- * Must be > 0.
+ * JSON sends a JSON response with status code.
*/
- suggestionsMinimumDistance: number
- }
- interface Command {
+ json(code: number, i: {
+ }): void
/**
- * Context returns underlying command context. If command was executed
- * with ExecuteContext or the context was set with SetContext, the
- * previously set context will be returned. Otherwise, nil is returned.
- *
- * Notice that a call to Execute and ExecuteC will replace a nil context of
- * a command with a context.Background, so a background context will be
- * returned by Context after one of these functions has been called.
+ * JSONPretty sends a pretty-print JSON with status code.
*/
- context(): context.Context
- }
- interface Command {
+ jsonPretty(code: number, i: {
+ }, indent: string): void
/**
- * SetContext sets context for the command. This context will be overwritten by
- * Command.ExecuteContext or Command.ExecuteContextC.
+ * JSONBlob sends a JSON blob response with status code.
*/
- setContext(ctx: context.Context): void
- }
- interface Command {
+ jsonBlob(code: number, b: string|Array): void
/**
- * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
- * particularly useful when testing.
+ * JSONP sends a JSONP response with status code. It uses `callback` to construct
+ * the JSONP payload.
*/
- setArgs(a: Array): void
- }
- interface Command {
+ jsonp(code: number, callback: string, i: {
+ }): void
/**
- * SetOutput sets the destination for usage and error messages.
- * If output is nil, os.Stderr is used.
- * Deprecated: Use SetOut and/or SetErr instead
+ * JSONPBlob sends a JSONP blob response with status code. It uses `callback`
+ * to construct the JSONP payload.
*/
- setOutput(output: io.Writer): void
- }
- interface Command {
+ jsonpBlob(code: number, callback: string, b: string|Array): void
/**
- * SetOut sets the destination for usage messages.
- * If newOut is nil, os.Stdout is used.
+ * XML sends an XML response with status code.
*/
- setOut(newOut: io.Writer): void
- }
- interface Command {
+ xml(code: number, i: {
+ }): void
/**
- * SetErr sets the destination for error messages.
- * If newErr is nil, os.Stderr is used.
+ * XMLPretty sends a pretty-print XML with status code.
*/
- setErr(newErr: io.Writer): void
- }
- interface Command {
+ xmlPretty(code: number, i: {
+ }, indent: string): void
/**
- * SetIn sets the source for input data
- * If newIn is nil, os.Stdin is used.
+ * XMLBlob sends an XML blob response with status code.
*/
- setIn(newIn: io.Reader): void
- }
- interface Command {
+ xmlBlob(code: number, b: string|Array): void
/**
- * SetUsageFunc sets usage function. Usage can be defined by application.
+ * Blob sends a blob response with status code and content type.
*/
- setUsageFunc(f: (_arg0: Command) => void): void
- }
- interface Command {
+ blob(code: number, contentType: string, b: string|Array): void
/**
- * SetUsageTemplate sets usage template. Can be defined by Application.
+ * Stream sends a streaming response with status code and content type.
*/
- setUsageTemplate(s: string): void
- }
- interface Command {
+ stream(code: number, contentType: string, r: io.Reader): void
/**
- * SetFlagErrorFunc sets a function to generate an error when flag parsing
- * fails.
+ * File sends a response with the content of the file.
*/
- setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void
- }
- interface Command {
+ file(file: string): void
/**
- * SetHelpFunc sets help function. Can be defined by Application.
+ * FileFS sends a response with the content of the file from given filesystem.
*/
- setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void
- }
- interface Command {
+ fileFS(file: string, filesystem: fs.FS): void
/**
- * SetHelpCommand sets help command.
+ * Attachment sends a response as attachment, prompting client to save the
+ * file.
*/
- setHelpCommand(cmd: Command): void
- }
- interface Command {
+ attachment(file: string, name: string): void
/**
- * SetHelpCommandGroupID sets the group id of the help command.
+ * Inline sends a response as inline, opening the file in the browser.
*/
- setHelpCommandGroupID(groupID: string): void
- }
- interface Command {
+ inline(file: string, name: string): void
/**
- * SetCompletionCommandGroupID sets the group id of the completion command.
+ * NoContent sends a response with no body and a status code.
*/
- setCompletionCommandGroupID(groupID: string): void
- }
- interface Command {
+ noContent(code: number): void
/**
- * SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+ * Redirect redirects the request to a provided URL with status code.
*/
- setHelpTemplate(s: string): void
- }
- interface Command {
+ redirect(code: number, url: string): void
/**
- * SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+ * Error invokes the registered global HTTP error handler. Generally used by middleware.
+ * A side-effect of calling global error handler is that now Response has been committed (sent to the client) and
+ * middlewares up in chain can not change Response status code or Response body anymore.
+ *
+ * Avoid using this method in handlers as no middleware will be able to effectively handle errors after that.
+ * Instead of calling this method in handler return your error and let it be handled by middlewares or global error handler.
*/
- setVersionTemplate(s: string): void
- }
- interface Command {
+ error(err: Error): void
/**
- * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
+ * Echo returns the `Echo` instance.
+ *
+ * WARNING: Remember that Echo public fields and methods are coroutine safe ONLY when you are NOT mutating them
+ * anywhere in your code after Echo server has started.
*/
- setErrPrefix(s: string): void
+ echo(): (Echo)
}
- interface Command {
+ // @ts-ignore
+ import stdContext = context
+ /**
+ * Echo is the top-level framework instance.
+ *
+ * Goroutine safety: Do not mutate Echo instance fields after server has started. Accessing these
+ * fields from handlers/middlewares and changing field values at the same time leads to data-races.
+ * Same rule applies to adding new routes after server has been started - Adding a route is not Goroutine safe action.
+ */
+ interface Echo {
/**
- * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
- * The user should not have a cyclic dependency on commands.
+ * NewContextFunc allows using custom context implementations, instead of default *echo.context
*/
- setGlobalNormalizationFunc(n: (f: any, name: string) => any): void
- }
- interface Command {
+ newContextFunc: (e: Echo, pathParamAllocSize: number) => ServableContext
+ debug: boolean
+ httpErrorHandler: HTTPErrorHandler
+ binder: Binder
+ jsonSerializer: JSONSerializer
+ validator: Validator
+ renderer: Renderer
+ logger: Logger
+ ipExtractor: IPExtractor
/**
- * OutOrStdout returns output to stdout.
+ * Filesystem is file system used by Static and File handlers to access files.
+ * Defaults to os.DirFS(".")
+ *
+ * When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
+ * prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
+ * including `assets/images` as their prefix.
*/
- outOrStdout(): io.Writer
- }
- interface Command {
+ filesystem: fs.FS
/**
- * OutOrStderr returns output to stderr
+ * OnAddRoute is called when Echo adds new route to specific host router. Handler is called for every router
+ * and before route is added to the host router.
*/
- outOrStderr(): io.Writer
+ onAddRoute: (host: string, route: Routable) => void
}
- interface Command {
+ /**
+ * HandlerFunc defines a function to serve HTTP requests.
+ */
+ interface HandlerFunc {(c: Context): void }
+ /**
+ * MiddlewareFunc defines a function to process middleware.
+ */
+ interface MiddlewareFunc {(next: HandlerFunc): HandlerFunc }
+ interface Echo {
/**
- * ErrOrStderr returns output to stderr
+ * NewContext returns a new Context instance.
+ *
+ * Note: both request and response can be left to nil as Echo.ServeHTTP will call c.Reset(req,resp) anyway
+ * these arguments are useful when creating context for tests and cases like that.
*/
- errOrStderr(): io.Writer
+ newContext(r: http.Request, w: http.ResponseWriter): Context
}
- interface Command {
+ interface Echo {
/**
- * InOrStdin returns input to stdin
+ * Router returns the default router.
*/
- inOrStdin(): io.Reader
+ router(): Router
}
- interface Command {
+ interface Echo {
/**
- * UsageFunc returns either the function set by SetUsageFunc for this command
- * or a parent, or it returns a default usage function.
+ * Routers returns the new map of host => router.
*/
- usageFunc(): (_arg0: Command) => void
+ routers(): _TygojaDict
}
- interface Command {
+ interface Echo {
/**
- * Usage puts out the usage for the command.
- * Used when a user provides invalid input.
- * Can be defined by user by overriding UsageFunc.
+ * RouterFor returns Router for given host. When host is left empty the default router is returned.
*/
- usage(): void
+ routerFor(host: string): [Router, boolean]
}
- interface Command {
+ interface Echo {
/**
- * HelpFunc returns either the function set by SetHelpFunc for this command
- * or a parent, or it returns a function with default help behavior.
+ * ResetRouterCreator resets callback for creating new router instances.
+ * Note: current (default) router is immediately replaced with router created with creator func and vhost routers are cleared.
*/
- helpFunc(): (_arg0: Command, _arg1: Array) => void
+ resetRouterCreator(creator: (e: Echo) => Router): void
}
- interface Command {
+ interface Echo {
/**
- * Help puts out the help for the command.
- * Used when a user calls help [command].
- * Can be defined by user by overriding HelpFunc.
+ * Pre adds middleware to the chain which is run before router tries to find matching route.
+ * Meaning middleware is executed even for 404 (not found) cases.
*/
- help(): void
+ pre(...middleware: MiddlewareFunc[]): void
}
- interface Command {
+ interface Echo {
/**
- * UsageString returns usage string.
+ * Use adds middleware to the chain which is run after router has found matching route and before route/request handler method is executed.
*/
- usageString(): string
+ use(...middleware: MiddlewareFunc[]): void
}
- interface Command {
+ interface Echo {
/**
- * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
- * command or a parent, or it returns a function which returns the original
- * error.
+ * CONNECT registers a new CONNECT route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- flagErrorFunc(): (_arg0: Command, _arg1: Error) => void
+ connect(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * UsagePadding return padding for the usage.
+ * DELETE registers a new DELETE route for a path with matching handler in the router
+ * with optional route-level middleware. Panics on error.
*/
- usagePadding(): number
+ delete(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * CommandPathPadding return padding for the command path.
+ * GET registers a new GET route for a path with matching handler in the router
+ * with optional route-level middleware. Panics on error.
*/
- commandPathPadding(): number
+ get(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * NamePadding returns padding for the name.
+ * HEAD registers a new HEAD route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- namePadding(): number
+ head(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * UsageTemplate returns usage template for the command.
+ * OPTIONS registers a new OPTIONS route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- usageTemplate(): string
+ options(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * HelpTemplate return help template for the command.
+ * PATCH registers a new PATCH route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- helpTemplate(): string
+ patch(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * VersionTemplate return version template for the command.
+ * POST registers a new POST route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- versionTemplate(): string
+ post(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ErrPrefix return error message prefix for the command
+ * PUT registers a new PUT route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- errPrefix(): string
+ put(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * Find the target command given the args and command tree
- * Meant to be run on the highest node. Only searches down.
+ * TRACE registers a new TRACE route for a path with matching handler in the
+ * router with optional route-level middleware. Panics on error.
*/
- find(args: Array): [(Command), Array]
+ trace(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * Traverse the command tree to find the command, and parse args for
- * each parent.
+ * RouteNotFound registers a special-case route which is executed when no other route is found (i.e. HTTP 404 cases)
+ * for current request URL.
+ * Path supports static and named/any parameters just like other http method is defined. Generally path is ended with
+ * wildcard/match-any character (`/*`, `/download/*` etc).
+ *
+ * Example: `e.RouteNotFound("/*", func(c echo.Context) error { return c.NoContent(http.StatusNotFound) })`
*/
- traverse(args: Array): [(Command), Array]
+ routeNotFound(path: string, h: HandlerFunc, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * SuggestionsFor provides suggestions for the typedName.
+ * Any registers a new route for all HTTP methods (supported by Echo) and path with matching handler
+ * in the router with optional route-level middleware.
+ *
+ * Note: this method only adds specific set of supported HTTP methods as handler and is not true
+ * "catch-any-arbitrary-method" way of matching requests.
*/
- suggestionsFor(typedName: string): Array
+ any(path: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): Routes
}
- interface Command {
+ interface Echo {
/**
- * VisitParents visits all parents of the command and invokes fn on each parent.
+ * Match registers a new route for multiple HTTP methods and path with matching
+ * handler in the router with optional route-level middleware. Panics on error.
*/
- visitParents(fn: (_arg0: Command) => void): void
+ match(methods: Array, path: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): Routes
}
- interface Command {
+ interface Echo {
/**
- * Root finds root command.
+ * Static registers a new route with path prefix to serve static files from the provided root directory.
*/
- root(): (Command)
+ static(pathPrefix: string, fsRoot: string): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ArgsLenAtDash will return the length of c.Flags().Args at the moment
- * when a -- was found during args parsing.
+ * StaticFS registers a new route with path prefix to serve static files from the provided file system.
+ *
+ * When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
+ * prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
+ * including `assets/images` as their prefix.
*/
- argsLenAtDash(): number
+ staticFS(pathPrefix: string, filesystem: fs.FS): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ExecuteContext is the same as Execute(), but sets the ctx on the command.
- * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
- * functions.
+ * FileFS registers a new route with path to serve file from the provided file system.
*/
- executeContext(ctx: context.Context): void
+ fileFS(path: string, file: string, filesystem: fs.FS, ...m: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * Execute uses the args (os.Args[1:] by default)
- * and run through the command tree finding appropriate matches
- * for commands and then corresponding flags.
+ * File registers a new route with path to serve a static file with optional route-level middleware. Panics on error.
*/
- execute(): void
+ file(path: string, file: string, ...middleware: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
- * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
- * functions.
+ * AddRoute registers a new Route with default host Router
*/
- executeContextC(ctx: context.Context): (Command)
+ addRoute(route: Routable): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ExecuteC executes the command.
+ * Add registers a new route for an HTTP method and path with matching handler
+ * in the router with optional route-level middleware.
*/
- executeC(): (Command)
- }
- interface Command {
- validateArgs(args: Array): void
+ add(method: string, path: string, handler: HandlerFunc, ...middleware: MiddlewareFunc[]): RouteInfo
}
- interface Command {
+ interface Echo {
/**
- * ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+ * Host creates a new router group for the provided host and optional host-level middleware.
*/
- validateRequiredFlags(): void
+ host(name: string, ...m: MiddlewareFunc[]): (Group)
}
- interface Command {
+ interface Echo {
/**
- * InitDefaultHelpFlag adds default help flag to c.
- * It is called automatically by executing the c or by calling help and usage.
- * If c already has help flag, it will do nothing.
+ * Group creates a new router group with prefix and optional group-level middleware.
*/
- initDefaultHelpFlag(): void
+ group(prefix: string, ...m: MiddlewareFunc[]): (Group)
}
- interface Command {
+ interface Echo {
/**
- * InitDefaultVersionFlag adds default version flag to c.
- * It is called automatically by executing the c.
- * If c already has a version flag, it will do nothing.
- * If c.Version is empty, it will do nothing.
+ * AcquireContext returns an empty `Context` instance from the pool.
+ * You must return the context by calling `ReleaseContext()`.
*/
- initDefaultVersionFlag(): void
+ acquireContext(): Context
}
- interface Command {
+ interface Echo {
/**
- * InitDefaultHelpCmd adds default help command to c.
- * It is called automatically by executing the c or by calling help and usage.
- * If c already has help command or c has no subcommands, it will do nothing.
+ * ReleaseContext returns the `Context` instance back to the pool.
+ * You must call it after `AcquireContext()`.
*/
- initDefaultHelpCmd(): void
+ releaseContext(c: Context): void
}
- interface Command {
+ interface Echo {
/**
- * ResetCommands delete parent, subcommand and help command from c.
+ * ServeHTTP implements `http.Handler` interface, which serves HTTP requests.
*/
- resetCommands(): void
+ serveHTTP(w: http.ResponseWriter, r: http.Request): void
}
- interface Command {
+ interface Echo {
/**
- * Commands returns a sorted slice of child commands.
+ * Start stars HTTP server on given address with Echo as a handler serving requests. The server can be shutdown by
+ * sending os.Interrupt signal with `ctrl+c`.
+ *
+ * Note: this method is created for use in examples/demos and is deliberately simple without providing configuration
+ * options.
+ *
+ * In need of customization use:
+ *
+ * ```
+ * sc := echo.StartConfig{Address: ":8080"}
+ * if err := sc.Start(e); err != http.ErrServerClosed {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * // or standard library `http.Server`
+ *
+ * ```
+ * s := http.Server{Addr: ":8080", Handler: e}
+ * if err := s.ListenAndServe(); err != http.ErrServerClosed {
+ * log.Fatal(err)
+ * }
+ * ```
*/
- commands(): Array<(Command | undefined)>
+ start(address: string): void
}
+}
+
+/**
+ * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+ * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+ */
+namespace cobra {
interface Command {
/**
- * AddCommand adds one or more commands to this parent command.
+ * GenBashCompletion generates bash completion file and writes to the passed writer.
*/
- addCommand(...cmds: (Command | undefined)[]): void
+ genBashCompletion(w: io.Writer): void
}
interface Command {
/**
- * Groups returns a slice of child command groups.
+ * GenBashCompletionFile generates bash completion file.
*/
- groups(): Array<(Group | undefined)>
+ genBashCompletionFile(filename: string): void
}
interface Command {
/**
- * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+ * GenBashCompletionFileV2 generates Bash completion version 2.
*/
- allChildCommandsHaveGroup(): boolean
+ genBashCompletionFileV2(filename: string, includeDesc: boolean): void
}
interface Command {
/**
- * ContainsGroup return if groupID exists in the list of command groups.
+ * GenBashCompletionV2 generates Bash completion file version 2
+ * and writes it to the passed writer.
*/
- containsGroup(groupID: string): boolean
+ genBashCompletionV2(w: io.Writer, includeDesc: boolean): void
}
+ // @ts-ignore
+ import flag = pflag
+ /**
+ * Command is just that, a command for your application.
+ * E.g. 'go run ...' - 'run' is the command. Cobra requires
+ * you to define the usage and description as part of your command
+ * definition to ensure usability.
+ */
interface Command {
/**
- * AddGroup adds one or more command groups to this parent command.
+ * Use is the one-line usage message.
+ * Recommended syntax is as follows:
+ * ```
+ * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ * ... indicates that you can specify multiple values for the previous argument.
+ * | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ * argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ * optional, they are enclosed in brackets ([ ]).
+ * ```
+ * Example: add [-F file | -D dir]... [-f format] profile
*/
- addGroup(...groups: (Group | undefined)[]): void
- }
- interface Command {
+ use: string
/**
- * RemoveCommand removes one or more commands from a parent command.
+ * Aliases is an array of aliases that can be used instead of the first word in Use.
*/
- removeCommand(...cmds: (Command | undefined)[]): void
- }
- interface Command {
+ aliases: Array
/**
- * Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+ * SuggestFor is an array of command names for which this command will be suggested -
+ * similar to aliases but only suggests.
*/
- print(...i: {
- }[]): void
- }
- interface Command {
+ suggestFor: Array
/**
- * Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+ * Short is the short description shown in the 'help' output.
*/
- println(...i: {
- }[]): void
- }
- interface Command {
+ short: string
/**
- * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+ * The group id under which this subcommand is grouped in the 'help' output of its parent.
*/
- printf(format: string, ...i: {
- }[]): void
- }
- interface Command {
+ groupID: string
/**
- * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+ * Long is the long message shown in the 'help ' output.
*/
- printErr(...i: {
- }[]): void
- }
- interface Command {
+ long: string
/**
- * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+ * Example is examples of how to use the command.
*/
- printErrln(...i: {
- }[]): void
- }
- interface Command {
+ example: string
/**
- * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+ * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
*/
- printErrf(format: string, ...i: {
- }[]): void
- }
- interface Command {
+ validArgs: Array
/**
- * CommandPath returns the full path to this command.
+ * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ * It is a dynamic version of using ValidArgs.
+ * Only one of ValidArgs and ValidArgsFunction can be used for a command.
*/
- commandPath(): string
- }
- interface Command {
+ validArgsFunction: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]
/**
- * UseLine puts out the full usage for a given command (including parents).
+ * Expected arguments
*/
- useLine(): string
- }
- interface Command {
+ args: PositionalArgs
/**
- * DebugFlags used to determine which flags have been assigned to which commands
- * and which persist.
- * nolint:goconst
+ * ArgAliases is List of aliases for ValidArgs.
+ * These are not suggested to the user in the shell completion,
+ * but accepted if entered manually.
*/
- debugFlags(): void
- }
- interface Command {
+ argAliases: Array
/**
- * Name returns the command's name: the first word in the use line.
+ * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ * For portability with other shells, it is recommended to instead use ValidArgsFunction
*/
- name(): string
- }
- interface Command {
+ bashCompletionFunction: string
/**
- * HasAlias determines if a given string is an alias of the command.
+ * Deprecated defines, if this command is deprecated and should print this string when used.
*/
- hasAlias(s: string): boolean
- }
- interface Command {
+ deprecated: string
/**
- * CalledAs returns the command name or alias that was used to invoke
- * this command or an empty string if the command has not been called.
+ * Annotations are key/value pairs that can be used by applications to identify or
+ * group commands or set special options.
*/
- calledAs(): string
- }
- interface Command {
+ annotations: _TygojaDict
/**
- * NameAndAliases returns a list of the command name and all aliases
+ * Version defines the version for this command. If this value is non-empty and the command does not
+ * define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ * will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ * command does not define one.
*/
- nameAndAliases(): string
- }
- interface Command {
+ version: string
/**
- * HasExample determines if the command has example.
+ * The *Run functions are executed in the following order:
+ * ```
+ * * PersistentPreRun()
+ * * PreRun()
+ * * Run()
+ * * PostRun()
+ * * PersistentPostRun()
+ * ```
+ * All functions get the same args, the arguments after the command name.
+ * The *PreRun and *PostRun functions will only be executed if the Run function of the current
+ * command has been declared.
+ *
+ * PersistentPreRun: children of this command will inherit and execute.
*/
- hasExample(): boolean
- }
- interface Command {
+ persistentPreRun: (cmd: Command, args: Array) => void
/**
- * Runnable determines if the command is itself runnable.
+ * PersistentPreRunE: PersistentPreRun but returns an error.
*/
- runnable(): boolean
- }
- interface Command {
+ persistentPreRunE: (cmd: Command, args: Array) => void
/**
- * HasSubCommands determines if the command has children commands.
+ * PreRun: children of this command will not inherit.
*/
- hasSubCommands(): boolean
- }
- interface Command {
+ preRun: (cmd: Command, args: Array) => void
/**
- * IsAvailableCommand determines if a command is available as a non-help command
- * (this includes all non deprecated/hidden commands).
+ * PreRunE: PreRun but returns an error.
*/
- isAvailableCommand(): boolean
- }
- interface Command {
+ preRunE: (cmd: Command, args: Array) => void
/**
- * IsAdditionalHelpTopicCommand determines if a command is an additional
- * help topic command; additional help topic command is determined by the
- * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
- * are runnable/hidden/deprecated.
- * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+ * Run: Typically the actual work function. Most commands will only implement this.
*/
- isAdditionalHelpTopicCommand(): boolean
- }
- interface Command {
+ run: (cmd: Command, args: Array) => void
/**
- * HasHelpSubCommands determines if a command has any available 'help' sub commands
- * that need to be shown in the usage/help default template under 'additional help
- * topics'.
+ * RunE: Run but returns an error.
*/
- hasHelpSubCommands(): boolean
- }
- interface Command {
+ runE: (cmd: Command, args: Array) => void
/**
- * HasAvailableSubCommands determines if a command has available sub commands that
- * need to be shown in the usage/help default template under 'available commands'.
+ * PostRun: run after the Run command.
*/
- hasAvailableSubCommands(): boolean
- }
- interface Command {
+ postRun: (cmd: Command, args: Array) => void
/**
- * HasParent determines if the command is a child command.
+ * PostRunE: PostRun but returns an error.
*/
- hasParent(): boolean
- }
- interface Command {
+ postRunE: (cmd: Command, args: Array) => void
/**
- * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+ * PersistentPostRun: children of this command will inherit and execute after PostRun.
*/
- globalNormalizationFunc(): (f: any, name: string) => any
- }
- interface Command {
+ persistentPostRun: (cmd: Command, args: Array) => void
/**
- * Flags returns the complete FlagSet that applies
- * to this command (local and persistent declared here and by all parents).
+ * PersistentPostRunE: PersistentPostRun but returns an error.
*/
- flags(): (any)
- }
- interface Command {
+ persistentPostRunE: (cmd: Command, args: Array) => void
/**
- * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+ * FParseErrWhitelist flag parse errors to be ignored
*/
- localNonPersistentFlags(): (any)
- }
- interface Command {
+ fParseErrWhitelist: FParseErrWhitelist
/**
- * LocalFlags returns the local FlagSet specifically set in the current command.
+ * CompletionOptions is a set of options to control the handling of shell completion
*/
- localFlags(): (any)
- }
- interface Command {
+ completionOptions: CompletionOptions
/**
- * InheritedFlags returns all flags which were inherited from parent commands.
+ * TraverseChildren parses flags on all parents before executing child command.
*/
- inheritedFlags(): (any)
- }
- interface Command {
+ traverseChildren: boolean
/**
- * NonInheritedFlags returns all flags which were not inherited from parent commands.
+ * Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
*/
- nonInheritedFlags(): (any)
- }
- interface Command {
+ hidden: boolean
/**
- * PersistentFlags returns the persistent FlagSet specifically set in the current command.
+ * SilenceErrors is an option to quiet errors down stream.
*/
- persistentFlags(): (any)
- }
- interface Command {
+ silenceErrors: boolean
/**
- * ResetFlags deletes all flags from command.
+ * SilenceUsage is an option to silence usage when an error occurs.
*/
- resetFlags(): void
- }
- interface Command {
+ silenceUsage: boolean
/**
- * HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+ * DisableFlagParsing disables the flag parsing.
+ * If this is true all flags will be passed to the command as arguments.
*/
- hasFlags(): boolean
- }
- interface Command {
+ disableFlagParsing: boolean
/**
- * HasPersistentFlags checks if the command contains persistent flags.
+ * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ * will be printed by generating docs for this command.
*/
- hasPersistentFlags(): boolean
- }
- interface Command {
+ disableAutoGenTag: boolean
/**
- * HasLocalFlags checks if the command has flags specifically declared locally.
+ * DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ * line of a command when printing help or generating docs
*/
- hasLocalFlags(): boolean
- }
- interface Command {
+ disableFlagsInUseLine: boolean
/**
- * HasInheritedFlags checks if the command has flags inherited from its parent command.
+ * DisableSuggestions disables the suggestions based on Levenshtein distance
+ * that go along with 'unknown command' messages.
*/
- hasInheritedFlags(): boolean
- }
- interface Command {
+ disableSuggestions: boolean
/**
- * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
- * structure) which are not hidden or deprecated.
+ * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ * Must be > 0.
*/
- hasAvailableFlags(): boolean
+ suggestionsMinimumDistance: number
}
interface Command {
/**
- * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+ * Context returns underlying command context. If command was executed
+ * with ExecuteContext or the context was set with SetContext, the
+ * previously set context will be returned. Otherwise, nil is returned.
+ *
+ * Notice that a call to Execute and ExecuteC will replace a nil context of
+ * a command with a context.Background, so a background context will be
+ * returned by Context after one of these functions has been called.
*/
- hasAvailablePersistentFlags(): boolean
+ context(): context.Context
}
interface Command {
/**
- * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
- * or deprecated.
+ * SetContext sets context for the command. This context will be overwritten by
+ * Command.ExecuteContext or Command.ExecuteContextC.
*/
- hasAvailableLocalFlags(): boolean
+ setContext(ctx: context.Context): void
}
interface Command {
/**
- * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
- * not hidden or deprecated.
+ * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+ * particularly useful when testing.
*/
- hasAvailableInheritedFlags(): boolean
+ setArgs(a: Array): void
}
interface Command {
/**
- * Flag climbs up the command tree looking for matching flag.
+ * SetOutput sets the destination for usage and error messages.
+ * If output is nil, os.Stderr is used.
+ * Deprecated: Use SetOut and/or SetErr instead
*/
- flag(name: string): (any)
+ setOutput(output: io.Writer): void
}
interface Command {
/**
- * ParseFlags parses persistent flag tree and local flags.
+ * SetOut sets the destination for usage messages.
+ * If newOut is nil, os.Stdout is used.
*/
- parseFlags(args: Array): void
+ setOut(newOut: io.Writer): void
}
interface Command {
/**
- * Parent returns a commands parent command.
+ * SetErr sets the destination for error messages.
+ * If newErr is nil, os.Stderr is used.
*/
- parent(): (Command)
+ setErr(newErr: io.Writer): void
}
interface Command {
/**
- * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+ * SetIn sets the source for input data
+ * If newIn is nil, os.Stdin is used.
*/
- registerFlagCompletionFunc(flagName: string, f: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]): void
+ setIn(newIn: io.Reader): void
}
interface Command {
/**
- * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+ * SetUsageFunc sets usage function. Usage can be defined by application.
*/
- getFlagCompletionFunc(flagName: string): [(_arg0: Command, _arg1: Array, _arg2: string) => [Array, ShellCompDirective], boolean]
+ setUsageFunc(f: (_arg0: Command) => void): void
}
interface Command {
/**
- * InitDefaultCompletionCmd adds a default 'completion' command to c.
- * This function will do nothing if any of the following is true:
- * 1- the feature has been explicitly disabled by the program,
- * 2- c has no subcommands (to avoid creating one),
- * 3- c already has a 'completion' command provided by the program.
+ * SetUsageTemplate sets usage template. Can be defined by Application.
*/
- initDefaultCompletionCmd(): void
+ setUsageTemplate(s: string): void
}
interface Command {
/**
- * GenFishCompletion generates fish completion file and writes to the passed writer.
+ * SetFlagErrorFunc sets a function to generate an error when flag parsing
+ * fails.
*/
- genFishCompletion(w: io.Writer, includeDesc: boolean): void
+ setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void
}
interface Command {
/**
- * GenFishCompletionFile generates fish completion file.
+ * SetHelpFunc sets help function. Can be defined by Application.
*/
- genFishCompletionFile(filename: string, includeDesc: boolean): void
+ setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void
}
interface Command {
/**
- * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
- * if the command is invoked with a subset (but not all) of the given flags.
+ * SetHelpCommand sets help command.
*/
- markFlagsRequiredTogether(...flagNames: string[]): void
+ setHelpCommand(cmd: Command): void
}
interface Command {
/**
- * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
- * if the command is invoked without at least one flag from the given set of flags.
+ * SetHelpCommandGroupID sets the group id of the help command.
*/
- markFlagsOneRequired(...flagNames: string[]): void
+ setHelpCommandGroupID(groupID: string): void
}
interface Command {
/**
- * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
- * if the command is invoked with more than one flag from the given set of flags.
+ * SetCompletionCommandGroupID sets the group id of the completion command.
*/
- markFlagsMutuallyExclusive(...flagNames: string[]): void
+ setCompletionCommandGroupID(groupID: string): void
}
interface Command {
/**
- * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
- * first error encountered.
+ * SetHelpTemplate sets help template to be used. Application can use it to set custom template.
*/
- validateFlagGroups(): void
+ setHelpTemplate(s: string): void
}
interface Command {
/**
- * GenPowerShellCompletionFile generates powershell completion file without descriptions.
+ * SetVersionTemplate sets version template to be used. Application can use it to set custom template.
*/
- genPowerShellCompletionFile(filename: string): void
+ setVersionTemplate(s: string): void
}
interface Command {
/**
- * GenPowerShellCompletion generates powershell completion file without descriptions
- * and writes it to the passed writer.
+ * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
*/
- genPowerShellCompletion(w: io.Writer): void
+ setErrPrefix(s: string): void
}
interface Command {
/**
- * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+ * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+ * The user should not have a cyclic dependency on commands.
*/
- genPowerShellCompletionFileWithDesc(filename: string): void
+ setGlobalNormalizationFunc(n: (f: any, name: string) => any): void
}
interface Command {
/**
- * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
- * and writes it to the passed writer.
+ * OutOrStdout returns output to stdout.
*/
- genPowerShellCompletionWithDesc(w: io.Writer): void
+ outOrStdout(): io.Writer
}
interface Command {
/**
- * MarkFlagRequired instructs the various shell completion implementations to
- * prioritize the named flag when performing completion,
- * and causes your command to report an error if invoked without the flag.
+ * OutOrStderr returns output to stderr
*/
- markFlagRequired(name: string): void
+ outOrStderr(): io.Writer
}
interface Command {
/**
- * MarkPersistentFlagRequired instructs the various shell completion implementations to
- * prioritize the named persistent flag when performing completion,
- * and causes your command to report an error if invoked without the flag.
+ * ErrOrStderr returns output to stderr
*/
- markPersistentFlagRequired(name: string): void
+ errOrStderr(): io.Writer
}
interface Command {
/**
- * MarkFlagFilename instructs the various shell completion implementations to
- * limit completions for the named flag to the specified file extensions.
+ * InOrStdin returns input to stdin
*/
- markFlagFilename(name: string, ...extensions: string[]): void
+ inOrStdin(): io.Reader
}
interface Command {
/**
- * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
- * The bash completion script will call the bash function f for the flag.
- *
- * This will only work for bash completion.
- * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
- * to register a Go function which will work across all shells.
+ * UsageFunc returns either the function set by SetUsageFunc for this command
+ * or a parent, or it returns a default usage function.
*/
- markFlagCustom(name: string, f: string): void
+ usageFunc(): (_arg0: Command) => void
}
interface Command {
/**
- * MarkPersistentFlagFilename instructs the various shell completion
- * implementations to limit completions for the named persistent flag to the
- * specified file extensions.
+ * Usage puts out the usage for the command.
+ * Used when a user provides invalid input.
+ * Can be defined by user by overriding UsageFunc.
*/
- markPersistentFlagFilename(name: string, ...extensions: string[]): void
+ usage(): void
}
interface Command {
/**
- * MarkFlagDirname instructs the various shell completion implementations to
- * limit completions for the named flag to directory names.
+ * HelpFunc returns either the function set by SetHelpFunc for this command
+ * or a parent, or it returns a function with default help behavior.
*/
- markFlagDirname(name: string): void
+ helpFunc(): (_arg0: Command, _arg1: Array) => void
}
interface Command {
/**
- * MarkPersistentFlagDirname instructs the various shell completion
- * implementations to limit completions for the named persistent flag to
- * directory names.
+ * Help puts out the help for the command.
+ * Used when a user calls help [command].
+ * Can be defined by user by overriding HelpFunc.
*/
- markPersistentFlagDirname(name: string): void
+ help(): void
}
interface Command {
/**
- * GenZshCompletionFile generates zsh completion file including descriptions.
+ * UsageString returns usage string.
*/
- genZshCompletionFile(filename: string): void
+ usageString(): string
}
interface Command {
/**
- * GenZshCompletion generates zsh completion file including descriptions
- * and writes it to the passed writer.
+ * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+ * command or a parent, or it returns a function which returns the original
+ * error.
*/
- genZshCompletion(w: io.Writer): void
+ flagErrorFunc(): (_arg0: Command, _arg1: Error) => void
}
interface Command {
/**
- * GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+ * UsagePadding return padding for the usage.
*/
- genZshCompletionFileNoDesc(filename: string): void
+ usagePadding(): number
}
interface Command {
/**
- * GenZshCompletionNoDesc generates zsh completion file without descriptions
- * and writes it to the passed writer.
+ * CommandPathPadding return padding for the command path.
*/
- genZshCompletionNoDesc(w: io.Writer): void
+ commandPathPadding(): number
}
interface Command {
/**
- * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
- * not consistent with Bash completion. It has therefore been disabled.
- * Instead, when no other completion is specified, file completion is done by
- * default for every argument. One can disable file completion on a per-argument
- * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
- * To achieve file extension filtering, one can use ValidArgsFunction and
- * ShellCompDirectiveFilterFileExt.
- *
- * Deprecated
+ * NamePadding returns padding for the name.
*/
- markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void
+ namePadding(): number
}
interface Command {
/**
- * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
- * been disabled.
- * To achieve the same behavior across all shells, one can use
- * ValidArgs (for the first argument only) or ValidArgsFunction for
- * any argument (can include the first one also).
- *
- * Deprecated
+ * UsageTemplate returns usage template for the command.
*/
- markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void
- }
-}
-
-/**
- * Package schema implements custom Schema and SchemaField datatypes
- * for handling the Collection schema definitions.
- */
-namespace schema {
- // @ts-ignore
- import validation = ozzo_validation
- /**
- * Schema defines a dynamic db schema as a slice of `SchemaField`s.
- */
- interface Schema {
+ usageTemplate(): string
}
- interface Schema {
+ interface Command {
/**
- * Fields returns the registered schema fields.
+ * HelpTemplate return help template for the command.
*/
- fields(): Array<(SchemaField | undefined)>
+ helpTemplate(): string
}
- interface Schema {
+ interface Command {
/**
- * InitFieldsOptions calls `InitOptions()` for all schema fields.
+ * VersionTemplate return version template for the command.
*/
- initFieldsOptions(): void
+ versionTemplate(): string
}
- interface Schema {
+ interface Command {
/**
- * Clone creates a deep clone of the current schema.
+ * ErrPrefix return error message prefix for the command
*/
- clone(): (Schema)
+ errPrefix(): string
}
- interface Schema {
+ interface Command {
/**
- * AsMap returns a map with all registered schema field.
- * The returned map is indexed with each field name.
+ * Find the target command given the args and command tree
+ * Meant to be run on the highest node. Only searches down.
*/
- asMap(): _TygojaDict
+ find(args: Array): [(Command), Array]
}
- interface Schema {
+ interface Command {
/**
- * GetFieldById returns a single field by its id.
+ * Traverse the command tree to find the command, and parse args for
+ * each parent.
*/
- getFieldById(id: string): (SchemaField)
+ traverse(args: Array): [(Command), Array]
}
- interface Schema {
+ interface Command {
/**
- * GetFieldByName returns a single field by its name.
+ * SuggestionsFor provides suggestions for the typedName.
*/
- getFieldByName(name: string): (SchemaField)
+ suggestionsFor(typedName: string): Array
}
- interface Schema {
+ interface Command {
/**
- * RemoveField removes a single schema field by its id.
- *
- * This method does nothing if field with `id` doesn't exist.
+ * VisitParents visits all parents of the command and invokes fn on each parent.
*/
- removeField(id: string): void
+ visitParents(fn: (_arg0: Command) => void): void
}
- interface Schema {
+ interface Command {
/**
- * AddField registers the provided newField to the current schema.
- *
- * If field with `newField.Id` already exist, the existing field is
- * replaced with the new one.
- *
- * Otherwise the new field is appended to the other schema fields.
+ * Root finds root command.
*/
- addField(newField: SchemaField): void
+ root(): (Command)
}
- interface Schema {
+ interface Command {
/**
- * Validate makes Schema validatable by implementing [validation.Validatable] interface.
- *
- * Internally calls each individual field's validator and additionally
- * checks for invalid renamed fields and field name duplications.
+ * ArgsLenAtDash will return the length of c.Flags().Args at the moment
+ * when a -- was found during args parsing.
*/
- validate(): void
+ argsLenAtDash(): number
}
- interface Schema {
+ interface Command {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
+ * ExecuteContext is the same as Execute(), but sets the ctx on the command.
+ * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+ * functions.
*/
- marshalJSON(): string|Array
+ executeContext(ctx: context.Context): void
}
- interface Schema {
+ interface Command {
/**
- * UnmarshalJSON implements the [json.Unmarshaler] interface.
- *
- * On success, all schema field options are auto initialized.
+ * Execute uses the args (os.Args[1:] by default)
+ * and run through the command tree finding appropriate matches
+ * for commands and then corresponding flags.
*/
- unmarshalJSON(data: string|Array): void
+ execute(): void
}
- interface Schema {
+ interface Command {
/**
- * Value implements the [driver.Valuer] interface.
+ * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+ * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+ * functions.
*/
- value(): any
+ executeContextC(ctx: context.Context): (Command)
}
- interface Schema {
+ interface Command {
/**
- * Scan implements [sql.Scanner] interface to scan the provided value
- * into the current Schema instance.
+ * ExecuteC executes the command.
*/
- scan(value: any): void
+ executeC(): (Command)
}
-}
-
-/**
- * Package models implements all PocketBase DB models and DTOs.
- */
-namespace models {
- type _subGeaoE = BaseModel
- interface Admin extends _subGeaoE {
- avatar: number
- email: string
- tokenKey: string
- passwordHash: string
- lastResetSentAt: types.DateTime
+ interface Command {
+ validateArgs(args: Array): void
}
- interface Admin {
+ interface Command {
/**
- * TableName returns the Admin model SQL table name.
+ * ValidateRequiredFlags validates all required flags are present and returns an error otherwise
*/
- tableName(): string
+ validateRequiredFlags(): void
}
- interface Admin {
+ interface Command {
/**
- * ValidatePassword validates a plain password against the model's password.
+ * InitDefaultHelpFlag adds default help flag to c.
+ * It is called automatically by executing the c or by calling help and usage.
+ * If c already has help flag, it will do nothing.
*/
- validatePassword(password: string): boolean
+ initDefaultHelpFlag(): void
}
- interface Admin {
+ interface Command {
/**
- * SetPassword sets cryptographically secure string to `model.Password`.
- *
- * Additionally this method also resets the LastResetSentAt and the TokenKey fields.
+ * InitDefaultVersionFlag adds default version flag to c.
+ * It is called automatically by executing the c.
+ * If c already has a version flag, it will do nothing.
+ * If c.Version is empty, it will do nothing.
*/
- setPassword(password: string): void
+ initDefaultVersionFlag(): void
}
- interface Admin {
+ interface Command {
/**
- * RefreshTokenKey generates and sets new random token key.
+ * InitDefaultHelpCmd adds default help command to c.
+ * It is called automatically by executing the c or by calling help and usage.
+ * If c already has help command or c has no subcommands, it will do nothing.
*/
- refreshTokenKey(): void
+ initDefaultHelpCmd(): void
}
- // @ts-ignore
- import validation = ozzo_validation
- type _subvYPhz = BaseModel
- interface Collection extends _subvYPhz {
- name: string
- type: string
- system: boolean
- schema: schema.Schema
- indexes: types.JsonArray
+ interface Command {
/**
- * rules
+ * ResetCommands delete parent, subcommand and help command from c.
*/
- listRule?: string
- viewRule?: string
- createRule?: string
- updateRule?: string
- deleteRule?: string
- options: types.JsonMap
+ resetCommands(): void
}
- interface Collection {
+ interface Command {
/**
- * TableName returns the Collection model SQL table name.
+ * Commands returns a sorted slice of child commands.
*/
- tableName(): string
+ commands(): Array<(Command | undefined)>
}
- interface Collection {
+ interface Command {
/**
- * BaseFilesPath returns the storage dir path used by the collection.
+ * AddCommand adds one or more commands to this parent command.
*/
- baseFilesPath(): string
+ addCommand(...cmds: (Command | undefined)[]): void
}
- interface Collection {
+ interface Command {
/**
- * IsBase checks if the current collection has "base" type.
+ * Groups returns a slice of child command groups.
*/
- isBase(): boolean
+ groups(): Array<(Group | undefined)>
}
- interface Collection {
+ interface Command {
/**
- * IsAuth checks if the current collection has "auth" type.
+ * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
*/
- isAuth(): boolean
+ allChildCommandsHaveGroup(): boolean
}
- interface Collection {
+ interface Command {
/**
- * IsView checks if the current collection has "view" type.
+ * ContainsGroup return if groupID exists in the list of command groups.
*/
- isView(): boolean
+ containsGroup(groupID: string): boolean
}
- interface Collection {
+ interface Command {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
+ * AddGroup adds one or more command groups to this parent command.
*/
- marshalJSON(): string|Array
+ addGroup(...groups: (Group | undefined)[]): void
}
- interface Collection {
+ interface Command {
/**
- * BaseOptions decodes the current collection options and returns them
- * as new [CollectionBaseOptions] instance.
+ * RemoveCommand removes one or more commands from a parent command.
*/
- baseOptions(): CollectionBaseOptions
+ removeCommand(...cmds: (Command | undefined)[]): void
}
- interface Collection {
+ interface Command {
/**
- * AuthOptions decodes the current collection options and returns them
- * as new [CollectionAuthOptions] instance.
+ * Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
*/
- authOptions(): CollectionAuthOptions
+ print(...i: {
+ }[]): void
}
- interface Collection {
+ interface Command {
/**
- * ViewOptions decodes the current collection options and returns them
- * as new [CollectionViewOptions] instance.
+ * Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
*/
- viewOptions(): CollectionViewOptions
+ println(...i: {
+ }[]): void
}
- interface Collection {
+ interface Command {
/**
- * NormalizeOptions updates the current collection options with a
- * new normalized state based on the collection type.
+ * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
*/
- normalizeOptions(): void
+ printf(format: string, ...i: {
+ }[]): void
}
- interface Collection {
+ interface Command {
/**
- * DecodeOptions decodes the current collection options into the
- * provided "result" (must be a pointer).
+ * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
*/
- decodeOptions(result: any): void
+ printErr(...i: {
+ }[]): void
}
- interface Collection {
+ interface Command {
/**
- * SetOptions normalizes and unmarshals the specified options into m.Options.
+ * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
*/
- setOptions(typedOptions: any): void
- }
- type _subZrUNb = BaseModel
- interface ExternalAuth extends _subZrUNb {
- collectionId: string
- recordId: string
- provider: string
- providerId: string
- }
- interface ExternalAuth {
- tableName(): string
- }
- type _subdDjDh = BaseModel
- interface Record extends _subdDjDh {
+ printErrln(...i: {
+ }[]): void
}
- interface Record {
+ interface Command {
/**
- * TableName returns the table name associated to the current Record model.
+ * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
*/
- tableName(): string
+ printErrf(format: string, ...i: {
+ }[]): void
}
- interface Record {
+ interface Command {
/**
- * Collection returns the Collection model associated to the current Record model.
+ * CommandPath returns the full path to this command.
*/
- collection(): (Collection)
+ commandPath(): string
}
- interface Record {
+ interface Command {
/**
- * OriginalCopy returns a copy of the current record model populated
- * with its ORIGINAL data state (aka. the initially loaded) and
- * everything else reset to the defaults.
+ * UseLine puts out the full usage for a given command (including parents).
*/
- originalCopy(): (Record)
+ useLine(): string
}
- interface Record {
+ interface Command {
/**
- * CleanCopy returns a copy of the current record model populated only
- * with its LATEST data state and everything else reset to the defaults.
+ * DebugFlags used to determine which flags have been assigned to which commands
+ * and which persist.
+ * nolint:goconst
*/
- cleanCopy(): (Record)
+ debugFlags(): void
}
- interface Record {
+ interface Command {
/**
- * Expand returns a shallow copy of the current Record model expand data.
+ * Name returns the command's name: the first word in the use line.
*/
- expand(): _TygojaDict
+ name(): string
}
- interface Record {
+ interface Command {
/**
- * SetExpand shallow copies the provided data to the current Record model's expand.
+ * HasAlias determines if a given string is an alias of the command.
*/
- setExpand(expand: _TygojaDict): void
+ hasAlias(s: string): boolean
}
- interface Record {
+ interface Command {
/**
- * MergeExpand merges recursively the provided expand data into
- * the current model's expand (if any).
- *
- * Note that if an expanded prop with the same key is a slice (old or new expand)
- * then both old and new records will be merged into a new slice (aka. a :merge: [b,c] => [a,b,c]).
- * Otherwise the "old" expanded record will be replace with the "new" one (aka. a :merge: aNew => aNew).
+ * CalledAs returns the command name or alias that was used to invoke
+ * this command or an empty string if the command has not been called.
*/
- mergeExpand(expand: _TygojaDict): void
+ calledAs(): string
}
- interface Record {
+ interface Command {
/**
- * SchemaData returns a shallow copy ONLY of the defined record schema fields data.
+ * NameAndAliases returns a list of the command name and all aliases
*/
- schemaData(): _TygojaDict
+ nameAndAliases(): string
}
- interface Record {
+ interface Command {
/**
- * UnknownData returns a shallow copy ONLY of the unknown record fields data,
- * aka. fields that are neither one of the base and special system ones,
- * nor defined by the collection schema.
+ * HasExample determines if the command has example.
*/
- unknownData(): _TygojaDict
+ hasExample(): boolean
}
- interface Record {
+ interface Command {
/**
- * IgnoreEmailVisibility toggles the flag to ignore the auth record email visibility check.
+ * Runnable determines if the command is itself runnable.
*/
- ignoreEmailVisibility(state: boolean): void
+ runnable(): boolean
}
- interface Record {
+ interface Command {
/**
- * WithUnknownData toggles the export/serialization of unknown data fields
- * (false by default).
+ * HasSubCommands determines if the command has children commands.
*/
- withUnknownData(state: boolean): void
+ hasSubCommands(): boolean
}
- interface Record {
+ interface Command {
/**
- * Set sets the provided key-value data pair for the current Record model.
- *
- * If the record collection has field with name matching the provided "key",
- * the value will be further normalized according to the field rules.
+ * IsAvailableCommand determines if a command is available as a non-help command
+ * (this includes all non deprecated/hidden commands).
*/
- set(key: string, value: any): void
+ isAvailableCommand(): boolean
}
- interface Record {
+ interface Command {
/**
- * Get returns a normalized single record model data value for "key".
+ * IsAdditionalHelpTopicCommand determines if a command is an additional
+ * help topic command; additional help topic command is determined by the
+ * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+ * are runnable/hidden/deprecated.
+ * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
*/
- get(key: string): any
+ isAdditionalHelpTopicCommand(): boolean
}
- interface Record {
+ interface Command {
/**
- * GetBool returns the data value for "key" as a bool.
+ * HasHelpSubCommands determines if a command has any available 'help' sub commands
+ * that need to be shown in the usage/help default template under 'additional help
+ * topics'.
*/
- getBool(key: string): boolean
+ hasHelpSubCommands(): boolean
}
- interface Record {
+ interface Command {
/**
- * GetString returns the data value for "key" as a string.
+ * HasAvailableSubCommands determines if a command has available sub commands that
+ * need to be shown in the usage/help default template under 'available commands'.
*/
- getString(key: string): string
+ hasAvailableSubCommands(): boolean
}
- interface Record {
+ interface Command {
/**
- * GetInt returns the data value for "key" as an int.
+ * HasParent determines if the command is a child command.
*/
- getInt(key: string): number
+ hasParent(): boolean
}
- interface Record {
+ interface Command {
/**
- * GetFloat returns the data value for "key" as a float64.
+ * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
*/
- getFloat(key: string): number
+ globalNormalizationFunc(): (f: any, name: string) => any
}
- interface Record {
+ interface Command {
/**
- * GetTime returns the data value for "key" as a [time.Time] instance.
+ * Flags returns the complete FlagSet that applies
+ * to this command (local and persistent declared here and by all parents).
*/
- getTime(key: string): time.Time
+ flags(): (any)
}
- interface Record {
+ interface Command {
/**
- * GetDateTime returns the data value for "key" as a DateTime instance.
+ * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
*/
- getDateTime(key: string): types.DateTime
+ localNonPersistentFlags(): (any)
}
- interface Record {
+ interface Command {
/**
- * GetStringSlice returns the data value for "key" as a slice of unique strings.
+ * LocalFlags returns the local FlagSet specifically set in the current command.
*/
- getStringSlice(key: string): Array
+ localFlags(): (any)
}
- interface Record {
+ interface Command {
/**
- * ExpandedOne retrieves a single relation Record from the already
- * loaded expand data of the current model.
- *
- * If the requested expand relation is multiple, this method returns
- * only first available Record from the expanded relation.
- *
- * Returns nil if there is no such expand relation loaded.
+ * InheritedFlags returns all flags which were inherited from parent commands.
*/
- expandedOne(relField: string): (Record)
+ inheritedFlags(): (any)
}
- interface Record {
+ interface Command {
/**
- * ExpandedAll retrieves a slice of relation Records from the already
- * loaded expand data of the current model.
- *
- * If the requested expand relation is single, this method normalizes
- * the return result and will wrap the single model as a slice.
- *
- * Returns nil slice if there is no such expand relation loaded.
+ * NonInheritedFlags returns all flags which were not inherited from parent commands.
*/
- expandedAll(relField: string): Array<(Record | undefined)>
+ nonInheritedFlags(): (any)
}
- interface Record {
+ interface Command {
/**
- * Retrieves the "key" json field value and unmarshals it into "result".
- *
- * Example
- *
- * ```
- * result := struct {
- * FirstName string `json:"first_name"`
- * }{}
- * err := m.UnmarshalJSONField("my_field_name", &result)
- * ```
+ * PersistentFlags returns the persistent FlagSet specifically set in the current command.
*/
- unmarshalJSONField(key: string, result: any): void
+ persistentFlags(): (any)
}
- interface Record {
+ interface Command {
/**
- * BaseFilesPath returns the storage dir path used by the record.
+ * ResetFlags deletes all flags from command.
*/
- baseFilesPath(): string
+ resetFlags(): void
}
- interface Record {
+ interface Command {
/**
- * FindFileFieldByFile returns the first file type field for which
- * any of the record's data contains the provided filename.
+ * HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
*/
- findFileFieldByFile(filename: string): (schema.SchemaField)
+ hasFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * Load bulk loads the provided data into the current Record model.
+ * HasPersistentFlags checks if the command contains persistent flags.
*/
- load(data: _TygojaDict): void
+ hasPersistentFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * ColumnValueMap implements [ColumnValueMapper] interface.
+ * HasLocalFlags checks if the command has flags specifically declared locally.
*/
- columnValueMap(): _TygojaDict
+ hasLocalFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * PublicExport exports only the record fields that are safe to be public.
- *
- * For auth records, to force the export of the email field you need to set
- * `m.IgnoreEmailVisibility(true)`.
+ * HasInheritedFlags checks if the command has flags inherited from its parent command.
*/
- publicExport(): _TygojaDict
+ hasInheritedFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * MarshalJSON implements the [json.Marshaler] interface.
- *
- * Only the data exported by `PublicExport()` will be serialized.
+ * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+ * structure) which are not hidden or deprecated.
*/
- marshalJSON(): string|Array
+ hasAvailableFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * UnmarshalJSON implements the [json.Unmarshaler] interface.
+ * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
*/
- unmarshalJSON(data: string|Array): void
+ hasAvailablePersistentFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * ReplaceModifers returns a new map with applied modifier
- * values based on the current record and the specified data.
- *
- * The resolved modifier keys will be removed.
- *
- * Multiple modifiers will be applied one after another,
- * while reusing the previous base key value result (eg. 1; -5; +2 => -2).
- *
- * Example usage:
- *
- * ```
- * newData := record.ReplaceModifers(data)
- * // record: {"field": 10}
- * // data: {"field+": 5}
- * // newData: {"field": 15}
- * ```
+ * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+ * or deprecated.
*/
- replaceModifers(data: _TygojaDict): _TygojaDict
+ hasAvailableLocalFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * Username returns the "username" auth record data value.
+ * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+ * not hidden or deprecated.
*/
- username(): string
+ hasAvailableInheritedFlags(): boolean
}
- interface Record {
+ interface Command {
/**
- * SetUsername sets the "username" auth record data value.
- *
- * This method doesn't check whether the provided value is a valid username.
- *
- * Returns an error if the record is not from an auth collection.
+ * Flag climbs up the command tree looking for matching flag.
*/
- setUsername(username: string): void
+ flag(name: string): (any)
}
- interface Record {
+ interface Command {
/**
- * Email returns the "email" auth record data value.
+ * ParseFlags parses persistent flag tree and local flags.
*/
- email(): string
+ parseFlags(args: Array): void
}
- interface Record {
+ interface Command {
/**
- * SetEmail sets the "email" auth record data value.
- *
- * This method doesn't check whether the provided value is a valid email.
- *
- * Returns an error if the record is not from an auth collection.
+ * Parent returns a commands parent command.
*/
- setEmail(email: string): void
+ parent(): (Command)
}
- interface Record {
+ interface Command {
/**
- * Verified returns the "emailVisibility" auth record data value.
+ * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
*/
- emailVisibility(): boolean
+ registerFlagCompletionFunc(flagName: string, f: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]): void
}
- interface Record {
+ interface Command {
/**
- * SetEmailVisibility sets the "emailVisibility" auth record data value.
- *
- * Returns an error if the record is not from an auth collection.
+ * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
*/
- setEmailVisibility(visible: boolean): void
+ getFlagCompletionFunc(flagName: string): [(_arg0: Command, _arg1: Array, _arg2: string) => [Array, ShellCompDirective], boolean]
}
- interface Record {
+ interface Command {
/**
- * Verified returns the "verified" auth record data value.
+ * InitDefaultCompletionCmd adds a default 'completion' command to c.
+ * This function will do nothing if any of the following is true:
+ * 1- the feature has been explicitly disabled by the program,
+ * 2- c has no subcommands (to avoid creating one),
+ * 3- c already has a 'completion' command provided by the program.
*/
- verified(): boolean
+ initDefaultCompletionCmd(): void
}
- interface Record {
+ interface Command {
/**
- * SetVerified sets the "verified" auth record data value.
- *
- * Returns an error if the record is not from an auth collection.
+ * GenFishCompletion generates fish completion file and writes to the passed writer.
*/
- setVerified(verified: boolean): void
+ genFishCompletion(w: io.Writer, includeDesc: boolean): void
}
- interface Record {
+ interface Command {
/**
- * TokenKey returns the "tokenKey" auth record data value.
+ * GenFishCompletionFile generates fish completion file.
*/
- tokenKey(): string
+ genFishCompletionFile(filename: string, includeDesc: boolean): void
}
- interface Record {
+ interface Command {
/**
- * SetTokenKey sets the "tokenKey" auth record data value.
- *
- * Returns an error if the record is not from an auth collection.
+ * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+ * if the command is invoked with a subset (but not all) of the given flags.
*/
- setTokenKey(key: string): void
+ markFlagsRequiredTogether(...flagNames: string[]): void
}
- interface Record {
+ interface Command {
/**
- * RefreshTokenKey generates and sets new random auth record "tokenKey".
- *
- * Returns an error if the record is not from an auth collection.
+ * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
+ * if the command is invoked without at least one flag from the given set of flags.
*/
- refreshTokenKey(): void
+ markFlagsOneRequired(...flagNames: string[]): void
}
- interface Record {
+ interface Command {
/**
- * LastResetSentAt returns the "lastResentSentAt" auth record data value.
+ * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+ * if the command is invoked with more than one flag from the given set of flags.
*/
- lastResetSentAt(): types.DateTime
+ markFlagsMutuallyExclusive(...flagNames: string[]): void
}
- interface Record {
+ interface Command {
/**
- * SetLastResetSentAt sets the "lastResentSentAt" auth record data value.
- *
- * Returns an error if the record is not from an auth collection.
+ * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
+ * first error encountered.
*/
- setLastResetSentAt(dateTime: types.DateTime): void
+ validateFlagGroups(): void
}
- interface Record {
+ interface Command {
/**
- * LastVerificationSentAt returns the "lastVerificationSentAt" auth record data value.
+ * GenPowerShellCompletionFile generates powershell completion file without descriptions.
*/
- lastVerificationSentAt(): types.DateTime
+ genPowerShellCompletionFile(filename: string): void
}
- interface Record {
+ interface Command {
/**
- * SetLastVerificationSentAt sets an "lastVerificationSentAt" auth record data value.
- *
- * Returns an error if the record is not from an auth collection.
+ * GenPowerShellCompletion generates powershell completion file without descriptions
+ * and writes it to the passed writer.
*/
- setLastVerificationSentAt(dateTime: types.DateTime): void
+ genPowerShellCompletion(w: io.Writer): void
}
- interface Record {
+ interface Command {
/**
- * PasswordHash returns the "passwordHash" auth record data value.
+ * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
*/
- passwordHash(): string
+ genPowerShellCompletionFileWithDesc(filename: string): void
}
- interface Record {
+ interface Command {
/**
- * ValidatePassword validates a plain password against the auth record password.
- *
- * Returns false if the password is incorrect or record is not from an auth collection.
+ * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+ * and writes it to the passed writer.
*/
- validatePassword(password: string): boolean
+ genPowerShellCompletionWithDesc(w: io.Writer): void
}
- interface Record {
+ interface Command {
/**
- * SetPassword sets cryptographically secure string to the auth record "password" field.
- * This method also resets the "lastResetSentAt" and the "tokenKey" fields.
- *
- * Returns an error if the record is not from an auth collection or
- * an empty password is provided.
+ * MarkFlagRequired instructs the various shell completion implementations to
+ * prioritize the named flag when performing completion,
+ * and causes your command to report an error if invoked without the flag.
*/
- setPassword(password: string): void
- }
- /**
- * RequestInfo defines a HTTP request data struct, usually used
- * as part of the `@request.*` filter resolver.
- */
- interface RequestInfo {
- query: _TygojaDict
- data: _TygojaDict
- headers: _TygojaDict
- authRecord?: Record
- admin?: Admin
- method: string
+ markFlagRequired(name: string): void
}
- interface RequestInfo {
+ interface Command {
/**
- * HasModifierDataKeys loosely checks if the current struct has any modifier Data keys.
+ * MarkPersistentFlagRequired instructs the various shell completion implementations to
+ * prioritize the named persistent flag when performing completion,
+ * and causes your command to report an error if invoked without the flag.
*/
- hasModifierDataKeys(): boolean
+ markPersistentFlagRequired(name: string): void
}
-}
-
-/**
- * Package daos handles common PocketBase DB model manipulations.
- *
- * Think of daos as DB repository and service layer in one.
- */
-namespace daos {
- interface Dao {
+ interface Command {
/**
- * AdminQuery returns a new Admin select query.
+ * MarkFlagFilename instructs the various shell completion implementations to
+ * limit completions for the named flag to the specified file extensions.
*/
- adminQuery(): (dbx.SelectQuery)
+ markFlagFilename(name: string, ...extensions: string[]): void
}
- interface Dao {
+ interface Command {
/**
- * FindAdminById finds the admin with the provided id.
+ * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+ * The bash completion script will call the bash function f for the flag.
+ *
+ * This will only work for bash completion.
+ * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+ * to register a Go function which will work across all shells.
*/
- findAdminById(id: string): (models.Admin)
+ markFlagCustom(name: string, f: string): void
}
- interface Dao {
+ interface Command {
/**
- * FindAdminByEmail finds the admin with the provided email address.
+ * MarkPersistentFlagFilename instructs the various shell completion
+ * implementations to limit completions for the named persistent flag to the
+ * specified file extensions.
*/
- findAdminByEmail(email: string): (models.Admin)
+ markPersistentFlagFilename(name: string, ...extensions: string[]): void
}
- interface Dao {
+ interface Command {
/**
- * FindAdminByToken finds the admin associated with the provided JWT.
- *
- * Returns an error if the JWT is invalid or expired.
+ * MarkFlagDirname instructs the various shell completion implementations to
+ * limit completions for the named flag to directory names.
*/
- findAdminByToken(token: string, baseTokenKey: string): (models.Admin)
+ markFlagDirname(name: string): void
}
- interface Dao {
+ interface Command {
/**
- * TotalAdmins returns the number of existing admin records.
+ * MarkPersistentFlagDirname instructs the various shell completion
+ * implementations to limit completions for the named persistent flag to
+ * directory names.
*/
- totalAdmins(): number
+ markPersistentFlagDirname(name: string): void
}
- interface Dao {
+ interface Command {
/**
- * IsAdminEmailUnique checks if the provided email address is not
- * already in use by other admins.
+ * GenZshCompletionFile generates zsh completion file including descriptions.
*/
- isAdminEmailUnique(email: string, ...excludeIds: string[]): boolean
+ genZshCompletionFile(filename: string): void
}
- interface Dao {
+ interface Command {
/**
- * DeleteAdmin deletes the provided Admin model.
- *
- * Returns an error if there is only 1 admin.
+ * GenZshCompletion generates zsh completion file including descriptions
+ * and writes it to the passed writer.
*/
- deleteAdmin(admin: models.Admin): void
+ genZshCompletion(w: io.Writer): void
}
- interface Dao {
+ interface Command {
/**
- * SaveAdmin upserts the provided Admin model.
+ * GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
*/
- saveAdmin(admin: models.Admin): void
+ genZshCompletionFileNoDesc(filename: string): void
}
- /**
- * Dao handles various db operations.
- *
- * You can think of Dao as a repository and service layer in one.
- */
- interface Dao {
- /**
- * MaxLockRetries specifies the default max "database is locked" auto retry attempts.
- */
- maxLockRetries: number
- /**
- * ModelQueryTimeout is the default max duration of a running ModelQuery().
- *
- * This field has no effect if an explicit query context is already specified.
- */
- modelQueryTimeout: time.Duration
+ interface Command {
/**
- * write hooks
+ * GenZshCompletionNoDesc generates zsh completion file without descriptions
+ * and writes it to the passed writer.
*/
- beforeCreateFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
- afterCreateFunc: (eventDao: Dao, m: models.Model) => void
- beforeUpdateFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
- afterUpdateFunc: (eventDao: Dao, m: models.Model) => void
- beforeDeleteFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
- afterDeleteFunc: (eventDao: Dao, m: models.Model) => void
+ genZshCompletionNoDesc(w: io.Writer): void
}
- interface Dao {
+ interface Command {
/**
- * DB returns the default dao db builder (*dbx.DB or *dbx.TX).
+ * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+ * not consistent with Bash completion. It has therefore been disabled.
+ * Instead, when no other completion is specified, file completion is done by
+ * default for every argument. One can disable file completion on a per-argument
+ * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+ * To achieve file extension filtering, one can use ValidArgsFunction and
+ * ShellCompDirectiveFilterFileExt.
*
- * Currently the default db builder is dao.concurrentDB but that may change in the future.
+ * Deprecated
*/
- db(): dbx.Builder
+ markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void
}
- interface Dao {
+ interface Command {
/**
- * ConcurrentDB returns the dao concurrent (aka. multiple open connections)
- * db builder (*dbx.DB or *dbx.TX).
+ * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+ * been disabled.
+ * To achieve the same behavior across all shells, one can use
+ * ValidArgs (for the first argument only) or ValidArgsFunction for
+ * any argument (can include the first one also).
*
- * In a transaction the concurrentDB and nonconcurrentDB refer to the same *dbx.TX instance.
+ * Deprecated
*/
- concurrentDB(): dbx.Builder
+ markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void
}
- interface Dao {
- /**
- * NonconcurrentDB returns the dao nonconcurrent (aka. single open connection)
- * db builder (*dbx.DB or *dbx.TX).
- *
- * In a transaction the concurrentDB and nonconcurrentDB refer to the same *dbx.TX instance.
- */
- nonconcurrentDB(): dbx.Builder
+}
+
+namespace auth {
+ /**
+ * AuthUser defines a standardized oauth2 user data structure.
+ */
+ interface AuthUser {
+ id: string
+ name: string
+ username: string
+ email: string
+ avatarUrl: string
+ accessToken: string
+ refreshToken: string
+ expiry: types.DateTime
+ rawUser: _TygojaDict
}
- interface Dao {
+ /**
+ * Provider defines a common interface for an OAuth2 client.
+ */
+ interface Provider {
+ [key:string]: any;
/**
- * Clone returns a new Dao with the same configuration options as the current one.
+ * Context returns the context associated with the provider (if any).
*/
- clone(): (Dao)
- }
- interface Dao {
+ context(): context.Context
/**
- * WithoutHooks returns a new Dao with the same configuration options
- * as the current one, but without create/update/delete hooks.
+ * SetContext assigns the specified context to the current provider.
*/
- withoutHooks(): (Dao)
- }
- interface Dao {
+ setContext(ctx: context.Context): void
/**
- * ModelQuery creates a new preconfigured select query with preset
- * SELECT, FROM and other common fields based on the provided model.
+ * PKCE indicates whether the provider can use the PKCE flow.
*/
- modelQuery(m: models.Model): (dbx.SelectQuery)
- }
- interface Dao {
+ pkce(): boolean
/**
- * FindById finds a single db record with the specified id and
- * scans the result into m.
+ * SetPKCE toggles the state whether the provider can use the PKCE flow or not.
*/
- findById(m: models.Model, id: string): void
- }
- interface Dao {
+ setPKCE(enable: boolean): void
/**
- * RunInTransaction wraps fn into a transaction.
- *
- * It is safe to nest RunInTransaction calls as long as you use the txDao.
+ * DisplayName usually returns provider name as it is officially written
+ * and it could be used directly in the UI.
*/
- runInTransaction(fn: (txDao: Dao) => void): void
- }
- interface Dao {
+ displayName(): string
/**
- * Delete deletes the provided model.
+ * SetDisplayName sets the provider's display name.
*/
- delete(m: models.Model): void
- }
- interface Dao {
+ setDisplayName(displayName: string): void
/**
- * Save persists the provided model in the database.
- *
- * If m.IsNew() is true, the method will perform a create, otherwise an update.
- * To explicitly mark a model for update you can use m.MarkAsNotNew().
+ * Scopes returns the provider access permissions that will be requested.
*/
- save(m: models.Model): void
- }
- interface Dao {
+ scopes(): Array
/**
- * CollectionQuery returns a new Collection select query.
+ * SetScopes sets the provider access permissions that will be requested later.
*/
- collectionQuery(): (dbx.SelectQuery)
- }
- interface Dao {
+ setScopes(scopes: Array): void
/**
- * FindCollectionsByType finds all collections by the given type.
+ * ClientId returns the provider client's app ID.
*/
- findCollectionsByType(collectionType: string): Array<(models.Collection | undefined)>
- }
- interface Dao {
+ clientId(): string
/**
- * FindCollectionByNameOrId finds a single collection by its name (case insensitive) or id.
+ * SetClientId sets the provider client's ID.
*/
- findCollectionByNameOrId(nameOrId: string): (models.Collection)
- }
- interface Dao {
+ setClientId(clientId: string): void
/**
- * IsCollectionNameUnique checks that there is no existing collection
- * with the provided name (case insensitive!).
- *
- * Note: case insensitive check because the name is used also as a table name for the records.
+ * ClientSecret returns the provider client's app secret.
*/
- isCollectionNameUnique(name: string, ...excludeIds: string[]): boolean
- }
- interface Dao {
+ clientSecret(): string
/**
- * FindCollectionReferences returns information for all
- * relation schema fields referencing the provided collection.
- *
- * If the provided collection has reference to itself then it will be
- * also included in the result. To exclude it, pass the collection id
- * as the excludeId argument.
+ * SetClientSecret sets the provider client's app secret.
*/
- findCollectionReferences(collection: models.Collection, ...excludeIds: string[]): _TygojaDict
- }
- interface Dao {
+ setClientSecret(secret: string): void
/**
- * DeleteCollection deletes the provided Collection model.
- * This method automatically deletes the related collection records table.
- *
- * NB! The collection cannot be deleted, if:
- * - is system collection (aka. collection.System is true)
- * - is referenced as part of a relation field in another collection
+ * RedirectUrl returns the end address to redirect the user
+ * going through the OAuth flow.
*/
- deleteCollection(collection: models.Collection): void
- }
- interface Dao {
+ redirectUrl(): string
/**
- * SaveCollection persists the provided Collection model and updates
- * its related records table schema.
- *
- * If collecction.IsNew() is true, the method will perform a create, otherwise an update.
- * To explicitly mark a collection for update you can use collecction.MarkAsNotNew().
+ * SetRedirectUrl sets the provider's RedirectUrl.
*/
- saveCollection(collection: models.Collection): void
- }
- interface Dao {
+ setRedirectUrl(url: string): void
/**
- * ImportCollections imports the provided collections list within a single transaction.
- *
- * NB1! If deleteMissing is set, all local collections and schema fields, that are not present
- * in the imported configuration, WILL BE DELETED (including their related records data).
- *
- * NB2! This method doesn't perform validations on the imported collections data!
- * If you need validations, use [forms.CollectionsImport].
+ * AuthUrl returns the provider's authorization service url.
*/
- importCollections(importedCollections: Array<(models.Collection | undefined)>, deleteMissing: boolean, afterSync: (txDao: Dao, mappedImported: _TygojaDict) => void): void
- }
- interface Dao {
+ authUrl(): string
/**
- * ExternalAuthQuery returns a new ExternalAuth select query.
+ * SetAuthUrl sets the provider's AuthUrl.
*/
- externalAuthQuery(): (dbx.SelectQuery)
- }
- interface Dao {
+ setAuthUrl(url: string): void
/**
- * FindAllExternalAuthsByRecord returns all ExternalAuth models
- * linked to the provided auth record.
+ * TokenUrl returns the provider's token exchange service url.
*/
- findAllExternalAuthsByRecord(authRecord: models.Record): Array<(models.ExternalAuth | undefined)>
- }
- interface Dao {
+ tokenUrl(): string
/**
- * FindExternalAuthByRecordAndProvider returns the first available
- * ExternalAuth model for the specified record data and provider.
+ * SetTokenUrl sets the provider's TokenUrl.
*/
- findExternalAuthByRecordAndProvider(authRecord: models.Record, provider: string): (models.ExternalAuth)
- }
- interface Dao {
+ setTokenUrl(url: string): void
/**
- * FindFirstExternalAuthByExpr returns the first available
- * ExternalAuth model that satisfies the non-nil expression.
+ * UserApiUrl returns the provider's user info api url.
*/
- findFirstExternalAuthByExpr(expr: dbx.Expression): (models.ExternalAuth)
- }
- interface Dao {
+ userApiUrl(): string
/**
- * SaveExternalAuth upserts the provided ExternalAuth model.
+ * SetUserApiUrl sets the provider's UserApiUrl.
*/
- saveExternalAuth(model: models.ExternalAuth): void
- }
- interface Dao {
+ setUserApiUrl(url: string): void
/**
- * DeleteExternalAuth deletes the provided ExternalAuth model.
+ * Client returns an http client using the provided token.
*/
- deleteExternalAuth(model: models.ExternalAuth): void
- }
- interface Dao {
+ client(token: oauth2.Token): (any)
/**
- * LogQuery returns a new Log select query.
+ * BuildAuthUrl returns a URL to the provider's consent page
+ * that asks for permissions for the required scopes explicitly.
*/
- logQuery(): (dbx.SelectQuery)
- }
- interface Dao {
+ buildAuthUrl(state: string, ...opts: oauth2.AuthCodeOption[]): string
/**
- * FindLogById finds a single Log entry by its id.
+ * FetchToken converts an authorization code to token.
*/
- findLogById(id: string): (models.Log)
- }
- interface Dao {
+ fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token)
/**
- * LogsStats returns hourly grouped requests logs statistics.
+ * FetchRawUserData requests and marshalizes into `result` the
+ * the OAuth user api response.
*/
- logsStats(expr: dbx.Expression): Array<(LogsStatsItem | undefined)>
- }
- interface Dao {
+ fetchRawUserData(token: oauth2.Token): string|Array
/**
- * DeleteOldLogs delete all requests that are created before createdBefore.
+ * FetchAuthUser is similar to FetchRawUserData, but normalizes and
+ * marshalizes the user api response into a standardized AuthUser struct.
*/
- deleteOldLogs(createdBefore: time.Time): void
+ fetchAuthUser(token: oauth2.Token): (AuthUser)
}
- interface Dao {
+}
+
+namespace settings {
+ // @ts-ignore
+ import validation = ozzo_validation
+ /**
+ * Settings defines common app configuration options.
+ */
+ interface Settings {
+ meta: MetaConfig
+ logs: LogsConfig
+ smtp: SmtpConfig
+ s3: S3Config
+ backups: BackupsConfig
+ adminAuthToken: TokenConfig
+ adminPasswordResetToken: TokenConfig
+ adminFileToken: TokenConfig
+ recordAuthToken: TokenConfig
+ recordPasswordResetToken: TokenConfig
+ recordEmailChangeToken: TokenConfig
+ recordVerificationToken: TokenConfig
+ recordFileToken: TokenConfig
/**
- * SaveLog upserts the provided Log model.
+ * Deprecated: Will be removed in v0.9+
*/
- saveLog(log: models.Log): void
+ emailAuth: EmailAuthConfig
+ googleAuth: AuthProviderConfig
+ facebookAuth: AuthProviderConfig
+ githubAuth: AuthProviderConfig
+ gitlabAuth: AuthProviderConfig
+ discordAuth: AuthProviderConfig
+ twitterAuth: AuthProviderConfig
+ microsoftAuth: AuthProviderConfig
+ spotifyAuth: AuthProviderConfig
+ kakaoAuth: AuthProviderConfig
+ twitchAuth: AuthProviderConfig
+ stravaAuth: AuthProviderConfig
+ giteeAuth: AuthProviderConfig
+ livechatAuth: AuthProviderConfig
+ giteaAuth: AuthProviderConfig
+ oidcAuth: AuthProviderConfig
+ oidc2Auth: AuthProviderConfig
+ oidc3Auth: AuthProviderConfig
+ appleAuth: AuthProviderConfig
+ instagramAuth: AuthProviderConfig
+ vkAuth: AuthProviderConfig
+ yandexAuth: AuthProviderConfig
+ patreonAuth: AuthProviderConfig
+ mailcowAuth: AuthProviderConfig
+ bitbucketAuth: AuthProviderConfig
+ planningcenterAuth: AuthProviderConfig
}
- interface Dao {
+ interface Settings {
/**
- * ParamQuery returns a new Param select query.
+ * Validate makes Settings validatable by implementing [validation.Validatable] interface.
*/
- paramQuery(): (dbx.SelectQuery)
+ validate(): void
}
- interface Dao {
+ interface Settings {
/**
- * FindParamByKey finds the first Param model with the provided key.
+ * Merge merges `other` settings into the current one.
*/
- findParamByKey(key: string): (models.Param)
+ merge(other: Settings): void
}
- interface Dao {
+ interface Settings {
/**
- * SaveParam creates or updates a Param model by the provided key-value pair.
- * The value argument will be encoded as json string.
- *
- * If `optEncryptionKey` is provided it will encrypt the value before storing it.
+ * Clone creates a new deep copy of the current settings.
*/
- saveParam(key: string, value: any, ...optEncryptionKey: string[]): void
+ clone(): (Settings)
}
- interface Dao {
+ interface Settings {
/**
- * DeleteParam deletes the provided Param model.
- */
- deleteParam(param: models.Param): void
+ * RedactClone creates a new deep copy of the current settings,
+ * while replacing the secret values with `******`.
+ */
+ redactClone(): (Settings)
}
- interface Dao {
+ interface Settings {
/**
- * RecordQuery returns a new Record select query from a collection model, id or name.
- *
- * In case a collection id or name is provided and that collection doesn't
- * actually exists, the generated query will be created with a cancelled context
- * and will fail once an executor (Row(), One(), All(), etc.) is called.
+ * NamedAuthProviderConfigs returns a map with all registered OAuth2
+ * provider configurations (indexed by their name identifier).
*/
- recordQuery(collectionModelOrIdentifier: any): (dbx.SelectQuery)
+ namedAuthProviderConfigs(): _TygojaDict
}
+}
+
+/**
+ * Package daos handles common PocketBase DB model manipulations.
+ *
+ * Think of daos as DB repository and service layer in one.
+ */
+namespace daos {
interface Dao {
/**
- * FindRecordById finds the Record model by its id.
+ * AdminQuery returns a new Admin select query.
*/
- findRecordById(collectionNameOrId: string, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[]): (models.Record)
+ adminQuery(): (dbx.SelectQuery)
}
interface Dao {
/**
- * FindRecordsByIds finds all Record models by the provided ids.
- * If no records are found, returns an empty slice.
+ * FindAdminById finds the admin with the provided id.
*/
- findRecordsByIds(collectionNameOrId: string, recordIds: Array, ...optFilters: ((q: dbx.SelectQuery) => void)[]): Array<(models.Record | undefined)>
+ findAdminById(id: string): (models.Admin)
}
interface Dao {
/**
- * @todo consider to depricate as it may be easier to just use dao.RecordQuery()
- *
- * FindRecordsByExpr finds all records by the specified db expression.
- *
- * Returns all collection records if no expressions are provided.
- *
- * Returns an empty slice if no records are found.
- *
- * Example:
- *
- * ```
- * expr1 := dbx.HashExp{"email": "test@example.com"}
- * expr2 := dbx.NewExp("LOWER(username) = {:username}", dbx.Params{"username": "test"})
- * dao.FindRecordsByExpr("example", expr1, expr2)
- * ```
+ * FindAdminByEmail finds the admin with the provided email address.
*/
- findRecordsByExpr(collectionNameOrId: string, ...exprs: dbx.Expression[]): Array<(models.Record | undefined)>
+ findAdminByEmail(email: string): (models.Admin)
}
interface Dao {
/**
- * FindFirstRecordByData returns the first found record matching
- * the provided key-value pair.
+ * FindAdminByToken finds the admin associated with the provided JWT.
+ *
+ * Returns an error if the JWT is invalid or expired.
*/
- findFirstRecordByData(collectionNameOrId: string, key: string, value: any): (models.Record)
+ findAdminByToken(token: string, baseTokenKey: string): (models.Admin)
}
interface Dao {
/**
- * FindRecordsByFilter returns limit number of records matching the
- * provided string filter.
- *
- * NB! Use the last "params" argument to bind untrusted user variables!
- *
- * The sort argument is optional and can be empty string OR the same format
- * used in the web APIs, eg. "-created,title".
- *
- * If the limit argument is <= 0, no limit is applied to the query and
- * all matching records are returned.
- *
- * Example:
- *
- * ```
- * dao.FindRecordsByFilter(
- * "posts",
- * "title ~ {:title} && visible = {:visible}",
- * "-created",
- * 10,
- * 0,
- * dbx.Params{"title": "lorem ipsum", "visible": true}
- * )
- * ```
+ * TotalAdmins returns the number of existing admin records.
*/
- findRecordsByFilter(collectionNameOrId: string, filter: string, sort: string, limit: number, offset: number, ...params: dbx.Params[]): Array<(models.Record | undefined)>
+ totalAdmins(): number
}
interface Dao {
/**
- * FindFirstRecordByFilter returns the first available record matching the provided filter.
- *
- * NB! Use the last params argument to bind untrusted user variables!
- *
- * Example:
- *
- * ```
- * dao.FindFirstRecordByFilter("posts", "slug={:slug} && status='public'", dbx.Params{"slug": "test"})
- * ```
+ * IsAdminEmailUnique checks if the provided email address is not
+ * already in use by other admins.
*/
- findFirstRecordByFilter(collectionNameOrId: string, filter: string, ...params: dbx.Params[]): (models.Record)
+ isAdminEmailUnique(email: string, ...excludeIds: string[]): boolean
}
interface Dao {
/**
- * IsRecordValueUnique checks if the provided key-value pair is a unique Record value.
- *
- * For correctness, if the collection is "auth" and the key is "username",
- * the unique check will be case insensitive.
+ * DeleteAdmin deletes the provided Admin model.
*
- * NB! Array values (eg. from multiple select fields) are matched
- * as a serialized json strings (eg. `["a","b"]`), so the value uniqueness
- * depends on the elements order. Or in other words the following values
- * are considered different: `[]string{"a","b"}` and `[]string{"b","a"}`
+ * Returns an error if there is only 1 admin.
*/
- isRecordValueUnique(collectionNameOrId: string, key: string, value: any, ...excludeIds: string[]): boolean
+ deleteAdmin(admin: models.Admin): void
}
interface Dao {
/**
- * FindAuthRecordByToken finds the auth record associated with the provided JWT.
- *
- * Returns an error if the JWT is invalid, expired or not associated to an auth collection record.
+ * SaveAdmin upserts the provided Admin model.
*/
- findAuthRecordByToken(token: string, baseTokenKey: string): (models.Record)
+ saveAdmin(admin: models.Admin): void
}
+ /**
+ * Dao handles various db operations.
+ *
+ * You can think of Dao as a repository and service layer in one.
+ */
interface Dao {
/**
- * FindAuthRecordByEmail finds the auth record associated with the provided email.
+ * MaxLockRetries specifies the default max "database is locked" auto retry attempts.
+ */
+ maxLockRetries: number
+ /**
+ * ModelQueryTimeout is the default max duration of a running ModelQuery().
*
- * Returns an error if it is not an auth collection or the record is not found.
+ * This field has no effect if an explicit query context is already specified.
*/
- findAuthRecordByEmail(collectionNameOrId: string, email: string): (models.Record)
+ modelQueryTimeout: time.Duration
+ /**
+ * write hooks
+ */
+ beforeCreateFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
+ afterCreateFunc: (eventDao: Dao, m: models.Model) => void
+ beforeUpdateFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
+ afterUpdateFunc: (eventDao: Dao, m: models.Model) => void
+ beforeDeleteFunc: (eventDao: Dao, m: models.Model, action: () => void) => void
+ afterDeleteFunc: (eventDao: Dao, m: models.Model) => void
}
interface Dao {
/**
- * FindAuthRecordByUsername finds the auth record associated with the provided username (case insensitive).
+ * DB returns the default dao db builder (*dbx.DB or *dbx.TX).
*
- * Returns an error if it is not an auth collection or the record is not found.
+ * Currently the default db builder is dao.concurrentDB but that may change in the future.
*/
- findAuthRecordByUsername(collectionNameOrId: string, username: string): (models.Record)
+ db(): dbx.Builder
}
interface Dao {
/**
- * SuggestUniqueAuthRecordUsername checks if the provided username is unique
- * and return a new "unique" username with appended random numeric part
- * (eg. "existingName" -> "existingName583").
+ * ConcurrentDB returns the dao concurrent (aka. multiple open connections)
+ * db builder (*dbx.DB or *dbx.TX).
*
- * The same username will be returned if the provided string is already unique.
+ * In a transaction the concurrentDB and nonconcurrentDB refer to the same *dbx.TX instance.
*/
- suggestUniqueAuthRecordUsername(collectionNameOrId: string, baseUsername: string, ...excludeIds: string[]): string
+ concurrentDB(): dbx.Builder
}
interface Dao {
/**
- * CanAccessRecord checks if a record is allowed to be accessed by the
- * specified requestInfo and accessRule.
- *
- * Rule and db checks are ignored in case requestInfo.Admin is set.
- *
- * The returned error indicate that something unexpected happened during
- * the check (eg. invalid rule or db error).
- *
- * The method always return false on invalid access rule or db error.
- *
- * Example:
- *
- * ```
- * requestInfo := apis.RequestInfo(c /* echo.Context *\/)
- * record, _ := dao.FindRecordById("example", "RECORD_ID")
- * rule := types.Pointer("@request.auth.id != '' || status = 'public'")
- * // ... or use one of the record collection's rule, eg. record.Collection().ViewRule
+ * NonconcurrentDB returns the dao nonconcurrent (aka. single open connection)
+ * db builder (*dbx.DB or *dbx.TX).
*
- * if ok, _ := dao.CanAccessRecord(record, requestInfo, rule); ok { ... }
- * ```
+ * In a transaction the concurrentDB and nonconcurrentDB refer to the same *dbx.TX instance.
*/
- canAccessRecord(record: models.Record, requestInfo: models.RequestInfo, accessRule: string): boolean
+ nonconcurrentDB(): dbx.Builder
}
interface Dao {
/**
- * SaveRecord persists the provided Record model in the database.
- *
- * If record.IsNew() is true, the method will perform a create, otherwise an update.
- * To explicitly mark a record for update you can use record.MarkAsNotNew().
+ * Clone returns a new Dao with the same configuration options as the current one.
*/
- saveRecord(record: models.Record): void
+ clone(): (Dao)
}
interface Dao {
/**
- * DeleteRecord deletes the provided Record model.
- *
- * This method will also cascade the delete operation to all linked
- * relational records (delete or unset, depending on the rel settings).
- *
- * The delete operation may fail if the record is part of a required
- * reference in another record (aka. cannot be deleted or unset).
+ * WithoutHooks returns a new Dao with the same configuration options
+ * as the current one, but without create/update/delete hooks.
*/
- deleteRecord(record: models.Record): void
+ withoutHooks(): (Dao)
}
interface Dao {
/**
- * ExpandRecord expands the relations of a single Record model.
- *
- * If optFetchFunc is not set, then a default function will be used
- * that returns all relation records.
- *
- * Returns a map with the failed expand parameters and their errors.
+ * ModelQuery creates a new preconfigured select query with preset
+ * SELECT, FROM and other common fields based on the provided model.
*/
- expandRecord(record: models.Record, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ modelQuery(m: models.Model): (dbx.SelectQuery)
}
interface Dao {
/**
- * ExpandRecords expands the relations of the provided Record models list.
- *
- * If optFetchFunc is not set, then a default function will be used
- * that returns all relation records.
- *
- * Returns a map with the failed expand parameters and their errors.
+ * FindById finds a single db record with the specified id and
+ * scans the result into m.
*/
- expandRecords(records: Array<(models.Record | undefined)>, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ findById(m: models.Model, id: string): void
}
- // @ts-ignore
- import validation = ozzo_validation
interface Dao {
/**
- * SyncRecordTableSchema compares the two provided collections
- * and applies the necessary related record table changes.
+ * RunInTransaction wraps fn into a transaction.
*
- * If `oldCollection` is null, then only `newCollection` is used to create the record table.
+ * It is safe to nest RunInTransaction calls as long as you use the txDao.
*/
- syncRecordTableSchema(newCollection: models.Collection, oldCollection: models.Collection): void
+ runInTransaction(fn: (txDao: Dao) => void): void
}
interface Dao {
/**
- * FindSettings returns and decode the serialized app settings param value.
- *
- * The method will first try to decode the param value without decryption.
- * If it fails and optEncryptionKey is set, it will try again by first
- * decrypting the value and then decode it again.
- *
- * Returns an error if it fails to decode the stored serialized param value.
+ * Delete deletes the provided model.
*/
- findSettings(...optEncryptionKey: string[]): (settings.Settings)
+ delete(m: models.Model): void
}
interface Dao {
/**
- * SaveSettings persists the specified settings configuration.
+ * Save persists the provided model in the database.
*
- * If optEncryptionKey is set, then the stored serialized value will be encrypted with it.
+ * If m.IsNew() is true, the method will perform a create, otherwise an update.
+ * To explicitly mark a model for update you can use m.MarkAsNotNew().
*/
- saveSettings(newSettings: settings.Settings, ...optEncryptionKey: string[]): void
+ save(m: models.Model): void
}
interface Dao {
/**
- * HasTable checks if a table (or view) with the provided name exists (case insensitive).
+ * CollectionQuery returns a new Collection select query.
*/
- hasTable(tableName: string): boolean
+ collectionQuery(): (dbx.SelectQuery)
}
interface Dao {
/**
- * TableColumns returns all column names of a single table by its name.
+ * FindCollectionsByType finds all collections by the given type.
*/
- tableColumns(tableName: string): Array
+ findCollectionsByType(collectionType: string): Array<(models.Collection | undefined)>
}
interface Dao {
/**
- * TableInfo returns the `table_info` pragma result for the specified table.
+ * FindCollectionByNameOrId finds a single collection by its name (case insensitive) or id.
*/
- tableInfo(tableName: string): Array<(models.TableInfoRow | undefined)>
+ findCollectionByNameOrId(nameOrId: string): (models.Collection)
}
interface Dao {
/**
- * TableIndexes returns a name grouped map with all non empty index of the specified table.
+ * IsCollectionNameUnique checks that there is no existing collection
+ * with the provided name (case insensitive!).
*
- * Note: This method doesn't return an error on nonexisting table.
+ * Note: case insensitive check because the name is used also as a table name for the records.
*/
- tableIndexes(tableName: string): _TygojaDict
+ isCollectionNameUnique(name: string, ...excludeIds: string[]): boolean
}
interface Dao {
/**
- * DeleteTable drops the specified table.
- *
- * This method is a no-op if a table with the provided name doesn't exist.
+ * FindCollectionReferences returns information for all
+ * relation schema fields referencing the provided collection.
*
- * Be aware that this method is vulnerable to SQL injection and the
- * "tableName" argument must come only from trusted input!
+ * If the provided collection has reference to itself then it will be
+ * also included in the result. To exclude it, pass the collection id
+ * as the excludeId argument.
*/
- deleteTable(tableName: string): void
+ findCollectionReferences(collection: models.Collection, ...excludeIds: string[]): _TygojaDict
}
interface Dao {
/**
- * Vacuum executes VACUUM on the current dao.DB() instance in order to
- * reclaim unused db disk space.
+ * DeleteCollection deletes the provided Collection model.
+ * This method automatically deletes the related collection records table.
+ *
+ * NB! The collection cannot be deleted, if:
+ * - is system collection (aka. collection.System is true)
+ * - is referenced as part of a relation field in another collection
*/
- vacuum(): void
+ deleteCollection(collection: models.Collection): void
}
interface Dao {
/**
- * DeleteView drops the specified view name.
- *
- * This method is a no-op if a view with the provided name doesn't exist.
+ * SaveCollection persists the provided Collection model and updates
+ * its related records table schema.
*
- * Be aware that this method is vulnerable to SQL injection and the
- * "name" argument must come only from trusted input!
+ * If collection.IsNew() is true, the method will perform a create, otherwise an update.
+ * To explicitly mark a collection for update you can use collection.MarkAsNotNew().
*/
- deleteView(name: string): void
+ saveCollection(collection: models.Collection): void
}
interface Dao {
/**
- * SaveView creates (or updates already existing) persistent SQL view.
+ * ImportCollections imports the provided collections list within a single transaction.
*
- * Be aware that this method is vulnerable to SQL injection and the
- * "selectQuery" argument must come only from trusted input!
+ * NB1! If deleteMissing is set, all local collections and schema fields, that are not present
+ * in the imported configuration, WILL BE DELETED (including their related records data).
+ *
+ * NB2! This method doesn't perform validations on the imported collections data!
+ * If you need validations, use [forms.CollectionsImport].
*/
- saveView(name: string, selectQuery: string): void
+ importCollections(importedCollections: Array<(models.Collection | undefined)>, deleteMissing: boolean, afterSync: (txDao: Dao, mappedImported: _TygojaDict, mappedExisting: _TygojaDict) => void): void
}
interface Dao {
/**
- * CreateViewSchema creates a new view schema from the provided select query.
- *
- * There are some caveats:
- * - The select query must have an "id" column.
- * - Wildcard ("*") columns are not supported to avoid accidentally leaking sensitive data.
+ * ExternalAuthQuery returns a new ExternalAuth select query.
*/
- createViewSchema(selectQuery: string): schema.Schema
+ externalAuthQuery(): (dbx.SelectQuery)
}
interface Dao {
/**
- * FindRecordByViewFile returns the original models.Record of the
- * provided view collection file.
+ * FindAllExternalAuthsByRecord returns all ExternalAuth models
+ * linked to the provided auth record.
*/
- findRecordByViewFile(viewCollectionNameOrId: string, fileFieldName: string, filename: string): (models.Record)
+ findAllExternalAuthsByRecord(authRecord: models.Record): Array<(models.ExternalAuth | undefined)>
}
-}
-
-/**
- * Package core is the backbone of PocketBase.
- *
- * It defines the main PocketBase App interface and its base implementation.
- */
-namespace core {
- /**
- * App defines the main PocketBase app interface.
- */
- interface App {
- [key:string]: any;
- /**
- * Deprecated:
- * This method may get removed in the near future.
- * It is recommended to access the app db instance from app.Dao().DB() or
- * if you want more flexibility - app.Dao().ConcurrentDB() and app.Dao().NonconcurrentDB().
- *
- * DB returns the default app database instance.
- */
- db(): (dbx.DB)
+ interface Dao {
/**
- * Dao returns the default app Dao instance.
- *
- * This Dao could operate only on the tables and models
- * associated with the default app database. For example,
- * trying to access the request logs table will result in error.
+ * FindExternalAuthByRecordAndProvider returns the first available
+ * ExternalAuth model for the specified record data and provider.
*/
- dao(): (daos.Dao)
+ findExternalAuthByRecordAndProvider(authRecord: models.Record, provider: string): (models.ExternalAuth)
+ }
+ interface Dao {
/**
- * Deprecated:
- * This method may get removed in the near future.
- * It is recommended to access the logs db instance from app.LogsDao().DB() or
- * if you want more flexibility - app.LogsDao().ConcurrentDB() and app.LogsDao().NonconcurrentDB().
- *
- * LogsDB returns the app logs database instance.
+ * FindFirstExternalAuthByExpr returns the first available
+ * ExternalAuth model that satisfies the non-nil expression.
*/
- logsDB(): (dbx.DB)
+ findFirstExternalAuthByExpr(expr: dbx.Expression): (models.ExternalAuth)
+ }
+ interface Dao {
/**
- * LogsDao returns the app logs Dao instance.
- *
- * This Dao could operate only on the tables and models
- * associated with the logs database. For example, trying to access
- * the users table from LogsDao will result in error.
+ * SaveExternalAuth upserts the provided ExternalAuth model.
*/
- logsDao(): (daos.Dao)
+ saveExternalAuth(model: models.ExternalAuth): void
+ }
+ interface Dao {
/**
- * Logger returns the active app logger.
+ * DeleteExternalAuth deletes the provided ExternalAuth model.
*/
- logger(): (slog.Logger)
+ deleteExternalAuth(model: models.ExternalAuth): void
+ }
+ interface Dao {
/**
- * DataDir returns the app data directory path.
+ * LogQuery returns a new Log select query.
*/
- dataDir(): string
+ logQuery(): (dbx.SelectQuery)
+ }
+ interface Dao {
/**
- * EncryptionEnv returns the name of the app secret env key
- * (used for settings encryption).
+ * FindLogById finds a single Log entry by its id.
*/
- encryptionEnv(): string
+ findLogById(id: string): (models.Log)
+ }
+ interface Dao {
/**
- * IsDev returns whether the app is in dev mode.
+ * LogsStats returns hourly grouped requests logs statistics.
*/
- isDev(): boolean
+ logsStats(expr: dbx.Expression): Array<(LogsStatsItem | undefined)>
+ }
+ interface Dao {
/**
- * Settings returns the loaded app settings.
+ * DeleteOldLogs delete all requests that are created before createdBefore.
*/
- settings(): (settings.Settings)
+ deleteOldLogs(createdBefore: time.Time): void
+ }
+ interface Dao {
/**
- * Deprecated: Use app.Store() instead.
+ * SaveLog upserts the provided Log model.
*/
- cache(): (store.Store)
+ saveLog(log: models.Log): void
+ }
+ interface Dao {
/**
- * Store returns the app runtime store.
+ * ParamQuery returns a new Param select query.
*/
- store(): (store.Store)
+ paramQuery(): (dbx.SelectQuery)
+ }
+ interface Dao {
/**
- * SubscriptionsBroker returns the app realtime subscriptions broker instance.
+ * FindParamByKey finds the first Param model with the provided key.
*/
- subscriptionsBroker(): (subscriptions.Broker)
+ findParamByKey(key: string): (models.Param)
+ }
+ interface Dao {
/**
- * NewMailClient creates and returns a configured app mail client.
+ * SaveParam creates or updates a Param model by the provided key-value pair.
+ * The value argument will be encoded as json string.
+ *
+ * If `optEncryptionKey` is provided it will encrypt the value before storing it.
*/
- newMailClient(): mailer.Mailer
+ saveParam(key: string, value: any, ...optEncryptionKey: string[]): void
+ }
+ interface Dao {
/**
- * NewFilesystem creates and returns a configured filesystem.System instance
- * for managing regular app files (eg. collection uploads).
- *
- * NB! Make sure to call Close() on the returned result
- * after you are done working with it.
+ * DeleteParam deletes the provided Param model.
*/
- newFilesystem(): (filesystem.System)
+ deleteParam(param: models.Param): void
+ }
+ interface Dao {
/**
- * NewBackupsFilesystem creates and returns a configured filesystem.System instance
- * for managing app backups.
+ * RecordQuery returns a new Record select query from a collection model, id or name.
*
- * NB! Make sure to call Close() on the returned result
- * after you are done working with it.
+ * In case a collection id or name is provided and that collection doesn't
+ * actually exists, the generated query will be created with a cancelled context
+ * and will fail once an executor (Row(), One(), All(), etc.) is called.
*/
- newBackupsFilesystem(): (filesystem.System)
+ recordQuery(collectionModelOrIdentifier: any): (dbx.SelectQuery)
+ }
+ interface Dao {
/**
- * RefreshSettings reinitializes and reloads the stored application settings.
+ * FindRecordById finds the Record model by its id.
*/
- refreshSettings(): void
+ findRecordById(collectionNameOrId: string, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[]): (models.Record)
+ }
+ interface Dao {
/**
- * IsBootstrapped checks if the application was initialized
- * (aka. whether Bootstrap() was called).
+ * FindRecordsByIds finds all Record models by the provided ids.
+ * If no records are found, returns an empty slice.
*/
- isBootstrapped(): boolean
+ findRecordsByIds(collectionNameOrId: string, recordIds: Array, ...optFilters: ((q: dbx.SelectQuery) => void)[]): Array<(models.Record | undefined)>
+ }
+ interface Dao {
/**
- * Bootstrap takes care for initializing the application
- * (open db connections, load settings, etc.).
+ * FindRecordsByExpr finds all records by the specified db expression.
*
- * It will call ResetBootstrapState() if the application was already bootstrapped.
+ * Returns all collection records if no expressions are provided.
+ *
+ * Returns an empty slice if no records are found.
+ *
+ * Example:
+ *
+ * ```
+ * expr1 := dbx.HashExp{"email": "test@example.com"}
+ * expr2 := dbx.NewExp("LOWER(username) = {:username}", dbx.Params{"username": "test"})
+ * dao.FindRecordsByExpr("example", expr1, expr2)
+ * ```
*/
- bootstrap(): void
+ findRecordsByExpr(collectionNameOrId: string, ...exprs: dbx.Expression[]): Array<(models.Record | undefined)>
+ }
+ interface Dao {
/**
- * ResetBootstrapState takes care for releasing initialized app resources
- * (eg. closing db connections).
+ * FindFirstRecordByData returns the first found record matching
+ * the provided key-value pair.
*/
- resetBootstrapState(): void
+ findFirstRecordByData(collectionNameOrId: string, key: string, value: any): (models.Record)
+ }
+ interface Dao {
/**
- * CreateBackup creates a new backup of the current app pb_data directory.
+ * FindRecordsByFilter returns limit number of records matching the
+ * provided string filter.
*
- * Backups can be stored on S3 if it is configured in app.Settings().Backups.
+ * NB! Use the last "params" argument to bind untrusted user variables!
*
- * Please refer to the godoc of the specific CoreApp implementation
- * for details on the backup procedures.
- */
- createBackup(ctx: context.Context, name: string): void
- /**
- * RestoreBackup restores the backup with the specified name and restarts
- * the current running application process.
+ * The sort argument is optional and can be empty string OR the same format
+ * used in the web APIs, eg. "-created,title".
*
- * The safely perform the restore it is recommended to have free disk space
- * for at least 2x the size of the restored pb_data backup.
+ * If the limit argument is <= 0, no limit is applied to the query and
+ * all matching records are returned.
*
- * Please refer to the godoc of the specific CoreApp implementation
- * for details on the restore procedures.
+ * Example:
*
- * NB! This feature is experimental and currently is expected to work only on UNIX based systems.
+ * ```
+ * dao.FindRecordsByFilter(
+ * "posts",
+ * "title ~ {:title} && visible = {:visible}",
+ * "-created",
+ * 10,
+ * 0,
+ * dbx.Params{"title": "lorem ipsum", "visible": true}
+ * )
+ * ```
*/
- restoreBackup(ctx: context.Context, name: string): void
+ findRecordsByFilter(collectionNameOrId: string, filter: string, sort: string, limit: number, offset: number, ...params: dbx.Params[]): Array<(models.Record | undefined)>
+ }
+ interface Dao {
/**
- * Restart restarts the current running application process.
+ * FindFirstRecordByFilter returns the first available record matching the provided filter.
*
- * Currently it is relying on execve so it is supported only on UNIX based systems.
+ * NB! Use the last params argument to bind untrusted user variables!
+ *
+ * Example:
+ *
+ * ```
+ * dao.FindFirstRecordByFilter("posts", "slug={:slug} && status='public'", dbx.Params{"slug": "test"})
+ * ```
*/
- restart(): void
+ findFirstRecordByFilter(collectionNameOrId: string, filter: string, ...params: dbx.Params[]): (models.Record)
+ }
+ interface Dao {
/**
- * OnBeforeBootstrap hook is triggered before initializing the main
- * application resources (eg. before db open and initial settings load).
+ * IsRecordValueUnique checks if the provided key-value pair is a unique Record value.
+ *
+ * For correctness, if the collection is "auth" and the key is "username",
+ * the unique check will be case insensitive.
+ *
+ * NB! Array values (eg. from multiple select fields) are matched
+ * as a serialized json strings (eg. `["a","b"]`), so the value uniqueness
+ * depends on the elements order. Or in other words the following values
+ * are considered different: `[]string{"a","b"}` and `[]string{"b","a"}`
*/
- onBeforeBootstrap(): (hook.Hook)
+ isRecordValueUnique(collectionNameOrId: string, key: string, value: any, ...excludeIds: string[]): boolean
+ }
+ interface Dao {
/**
- * OnAfterBootstrap hook is triggered after initializing the main
- * application resources (eg. after db open and initial settings load).
+ * FindAuthRecordByToken finds the auth record associated with the provided JWT.
+ *
+ * Returns an error if the JWT is invalid, expired or not associated to an auth collection record.
*/
- onAfterBootstrap(): (hook.Hook)
+ findAuthRecordByToken(token: string, baseTokenKey: string): (models.Record)
+ }
+ interface Dao {
/**
- * OnBeforeServe hook is triggered before serving the internal router (echo),
- * allowing you to adjust its options and attach new routes or middlewares.
+ * FindAuthRecordByEmail finds the auth record associated with the provided email.
+ *
+ * Returns an error if it is not an auth collection or the record is not found.
*/
- onBeforeServe(): (hook.Hook)
+ findAuthRecordByEmail(collectionNameOrId: string, email: string): (models.Record)
+ }
+ interface Dao {
/**
- * OnBeforeApiError hook is triggered right before sending an error API
- * response to the client, allowing you to further modify the error data
- * or to return a completely different API response.
+ * FindAuthRecordByUsername finds the auth record associated with the provided username (case insensitive).
+ *
+ * Returns an error if it is not an auth collection or the record is not found.
*/
- onBeforeApiError(): (hook.Hook)
+ findAuthRecordByUsername(collectionNameOrId: string, username: string): (models.Record)
+ }
+ interface Dao {
/**
- * OnAfterApiError hook is triggered right after sending an error API
- * response to the client.
- * It could be used to log the final API error in external services.
+ * SuggestUniqueAuthRecordUsername checks if the provided username is unique
+ * and return a new "unique" username with appended random numeric part
+ * (eg. "existingName" -> "existingName583").
+ *
+ * The same username will be returned if the provided string is already unique.
*/
- onAfterApiError(): (hook.Hook)
+ suggestUniqueAuthRecordUsername(collectionNameOrId: string, baseUsername: string, ...excludeIds: string[]): string
+ }
+ interface Dao {
/**
- * OnTerminate hook is triggered when the app is in the process
- * of being terminated (eg. on SIGTERM signal).
+ * CanAccessRecord checks if a record is allowed to be accessed by the
+ * specified requestInfo and accessRule.
+ *
+ * Rule and db checks are ignored in case requestInfo.Admin is set.
+ *
+ * The returned error indicate that something unexpected happened during
+ * the check (eg. invalid rule or db error).
+ *
+ * The method always return false on invalid access rule or db error.
+ *
+ * Example:
+ *
+ * ```
+ * requestInfo := apis.RequestInfo(c /* echo.Context *\/)
+ * record, _ := dao.FindRecordById("example", "RECORD_ID")
+ * rule := types.Pointer("@request.auth.id != '' || status = 'public'")
+ * // ... or use one of the record collection's rule, eg. record.Collection().ViewRule
+ *
+ * if ok, _ := dao.CanAccessRecord(record, requestInfo, rule); ok { ... }
+ * ```
*/
- onTerminate(): (hook.Hook)
+ canAccessRecord(record: models.Record, requestInfo: models.RequestInfo, accessRule: string): boolean
+ }
+ interface Dao {
/**
- * OnModelBeforeCreate hook is triggered before inserting a new
- * model in the DB, allowing you to modify or validate the stored data.
+ * SaveRecord persists the provided Record model in the database.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * If record.IsNew() is true, the method will perform a create, otherwise an update.
+ * To explicitly mark a record for update you can use record.MarkAsNotNew().
*/
- onModelBeforeCreate(...tags: string[]): (hook.TaggedHook)
+ saveRecord(record: models.Record): void
+ }
+ interface Dao {
/**
- * OnModelAfterCreate hook is triggered after successfully
- * inserting a new model in the DB.
+ * DeleteRecord deletes the provided Record model.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * This method will also cascade the delete operation to all linked
+ * relational records (delete or unset, depending on the rel settings).
+ *
+ * The delete operation may fail if the record is part of a required
+ * reference in another record (aka. cannot be deleted or unset).
*/
- onModelAfterCreate(...tags: string[]): (hook.TaggedHook)
+ deleteRecord(record: models.Record): void
+ }
+ interface Dao {
/**
- * OnModelBeforeUpdate hook is triggered before updating existing
- * model in the DB, allowing you to modify or validate the stored data.
+ * ExpandRecord expands the relations of a single Record model.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * If optFetchFunc is not set, then a default function will be used
+ * that returns all relation records.
+ *
+ * Returns a map with the failed expand parameters and their errors.
*/
- onModelBeforeUpdate(...tags: string[]): (hook.TaggedHook)
+ expandRecord(record: models.Record, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ }
+ interface Dao {
/**
- * OnModelAfterUpdate hook is triggered after successfully updating
- * existing model in the DB.
+ * ExpandRecords expands the relations of the provided Record models list.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * If optFetchFunc is not set, then a default function will be used
+ * that returns all relation records.
+ *
+ * Returns a map with the failed expand parameters and their errors.
*/
- onModelAfterUpdate(...tags: string[]): (hook.TaggedHook)
+ expandRecords(records: Array<(models.Record | undefined)>, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ }
+ // @ts-ignore
+ import validation = ozzo_validation
+ interface Dao {
/**
- * OnModelBeforeDelete hook is triggered before deleting an
- * existing model from the DB.
+ * SyncRecordTableSchema compares the two provided collections
+ * and applies the necessary related record table changes.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * If `oldCollection` is null, then only `newCollection` is used to create the record table.
*/
- onModelBeforeDelete(...tags: string[]): (hook.TaggedHook)
+ syncRecordTableSchema(newCollection: models.Collection, oldCollection: models.Collection): void
+ }
+ interface Dao {
/**
- * OnModelAfterDelete hook is triggered after successfully deleting an
- * existing model from the DB.
+ * FindSettings returns and decode the serialized app settings param value.
*
- * If the optional "tags" list (table names and/or the Collection id for Record models)
- * is specified, then all event handlers registered via the created hook
- * will be triggered and called only if their event data origin matches the tags.
+ * The method will first try to decode the param value without decryption.
+ * If it fails and optEncryptionKey is set, it will try again by first
+ * decrypting the value and then decode it again.
+ *
+ * Returns an error if it fails to decode the stored serialized param value.
*/
- onModelAfterDelete(...tags: string[]): (hook.TaggedHook)
+ findSettings(...optEncryptionKey: string[]): (settings.Settings)
+ }
+ interface Dao {
/**
- * OnMailerBeforeAdminResetPasswordSend hook is triggered right
- * before sending a password reset email to an admin, allowing you
- * to inspect and customize the email message that is being sent.
+ * SaveSettings persists the specified settings configuration.
+ *
+ * If optEncryptionKey is set, then the stored serialized value will be encrypted with it.
*/
- onMailerBeforeAdminResetPasswordSend(): (hook.Hook)
+ saveSettings(newSettings: settings.Settings, ...optEncryptionKey: string[]): void
+ }
+ interface Dao {
/**
- * OnMailerAfterAdminResetPasswordSend hook is triggered after
- * admin password reset email was successfully sent.
+ * HasTable checks if a table (or view) with the provided name exists (case insensitive).
*/
- onMailerAfterAdminResetPasswordSend(): (hook.Hook)
+ hasTable(tableName: string): boolean
+ }
+ interface Dao {
/**
- * OnMailerBeforeRecordResetPasswordSend hook is triggered right
- * before sending a password reset email to an auth record, allowing
- * you to inspect and customize the email message that is being sent.
- *
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * TableColumns returns all column names of a single table by its name.
*/
- onMailerBeforeRecordResetPasswordSend(...tags: string[]): (hook.TaggedHook)
+ tableColumns(tableName: string): Array
+ }
+ interface Dao {
/**
- * OnMailerAfterRecordResetPasswordSend hook is triggered after
- * an auth record password reset email was successfully sent.
- *
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * TableInfo returns the `table_info` pragma result for the specified table.
*/
- onMailerAfterRecordResetPasswordSend(...tags: string[]): (hook.TaggedHook)
+ tableInfo(tableName: string): Array<(models.TableInfoRow | undefined)>
+ }
+ interface Dao {
/**
- * OnMailerBeforeRecordVerificationSend hook is triggered right
- * before sending a verification email to an auth record, allowing
- * you to inspect and customize the email message that is being sent.
+ * TableIndexes returns a name grouped map with all non empty index of the specified table.
*
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * Note: This method doesn't return an error on nonexisting table.
*/
- onMailerBeforeRecordVerificationSend(...tags: string[]): (hook.TaggedHook)
+ tableIndexes(tableName: string): _TygojaDict
+ }
+ interface Dao {
/**
- * OnMailerAfterRecordVerificationSend hook is triggered after a
- * verification email was successfully sent to an auth record.
+ * DeleteTable drops the specified table.
*
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
- */
- onMailerAfterRecordVerificationSend(...tags: string[]): (hook.TaggedHook)
- /**
- * OnMailerBeforeRecordChangeEmailSend hook is triggered right before
- * sending a confirmation new address email to an auth record, allowing
- * you to inspect and customize the email message that is being sent.
+ * This method is a no-op if a table with the provided name doesn't exist.
*
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * Be aware that this method is vulnerable to SQL injection and the
+ * "tableName" argument must come only from trusted input!
*/
- onMailerBeforeRecordChangeEmailSend(...tags: string[]): (hook.TaggedHook)
+ deleteTable(tableName: string): void
+ }
+ interface Dao {
/**
- * OnMailerAfterRecordChangeEmailSend hook is triggered after a
- * verification email was successfully sent to an auth record.
- *
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * Vacuum executes VACUUM on the current dao.DB() instance in order to
+ * reclaim unused db disk space.
*/
- onMailerAfterRecordChangeEmailSend(...tags: string[]): (hook.TaggedHook)
+ vacuum(): void
+ }
+ interface Dao {
/**
- * OnRealtimeConnectRequest hook is triggered right before establishing
- * the SSE client connection.
+ * DeleteView drops the specified view name.
+ *
+ * This method is a no-op if a view with the provided name doesn't exist.
+ *
+ * Be aware that this method is vulnerable to SQL injection and the
+ * "name" argument must come only from trusted input!
*/
- onRealtimeConnectRequest(): (hook.Hook)
+ deleteView(name: string): void
+ }
+ interface Dao {
/**
- * OnRealtimeDisconnectRequest hook is triggered on disconnected/interrupted
- * SSE client connection.
+ * SaveView creates (or updates already existing) persistent SQL view.
+ *
+ * Be aware that this method is vulnerable to SQL injection and the
+ * "selectQuery" argument must come only from trusted input!
*/
- onRealtimeDisconnectRequest(): (hook.Hook)
+ saveView(name: string, selectQuery: string): void
+ }
+ interface Dao {
/**
- * OnRealtimeBeforeMessageSend hook is triggered right before sending
- * an SSE message to a client.
+ * CreateViewSchema creates a new view schema from the provided select query.
*
- * Returning [hook.StopPropagation] will prevent sending the message.
- * Returning any other non-nil error will close the realtime connection.
+ * There are some caveats:
+ * - The select query must have an "id" column.
+ * - Wildcard ("*") columns are not supported to avoid accidentally leaking sensitive data.
*/
- onRealtimeBeforeMessageSend(): (hook.Hook)
+ createViewSchema(selectQuery: string): schema.Schema
+ }
+ interface Dao {
/**
- * OnRealtimeAfterMessageSend hook is triggered right after sending
- * an SSE message to a client.
+ * FindRecordByViewFile returns the original models.Record of the
+ * provided view collection file.
*/
- onRealtimeAfterMessageSend(): (hook.Hook)
+ findRecordByViewFile(viewCollectionNameOrId: string, fileFieldName: string, filename: string): (models.Record)
+ }
+}
+
+/**
+ * Package core is the backbone of PocketBase.
+ *
+ * It defines the main PocketBase App interface and its base implementation.
+ */
+namespace core {
+ /**
+ * App defines the main PocketBase app interface.
+ */
+ interface App {
+ [key:string]: any;
/**
- * OnRealtimeBeforeSubscribeRequest hook is triggered before changing
- * the client subscriptions, allowing you to further validate and
- * modify the submitted change.
+ * Deprecated:
+ * This method may get removed in the near future.
+ * It is recommended to access the app db instance from app.Dao().DB() or
+ * if you want more flexibility - app.Dao().ConcurrentDB() and app.Dao().NonconcurrentDB().
+ *
+ * DB returns the default app database instance.
*/
- onRealtimeBeforeSubscribeRequest(): (hook.Hook)
+ db(): (dbx.DB)
/**
- * OnRealtimeAfterSubscribeRequest hook is triggered after the client
- * subscriptions were successfully changed.
+ * Dao returns the default app Dao instance.
+ *
+ * This Dao could operate only on the tables and models
+ * associated with the default app database. For example,
+ * trying to access the request logs table will result in error.
*/
- onRealtimeAfterSubscribeRequest(): (hook.Hook)
+ dao(): (daos.Dao)
/**
- * OnSettingsListRequest hook is triggered on each successful
- * API Settings list request.
+ * Deprecated:
+ * This method may get removed in the near future.
+ * It is recommended to access the logs db instance from app.LogsDao().DB() or
+ * if you want more flexibility - app.LogsDao().ConcurrentDB() and app.LogsDao().NonconcurrentDB().
*
- * Could be used to validate or modify the response before
- * returning it to the client.
+ * LogsDB returns the app logs database instance.
*/
- onSettingsListRequest(): (hook.Hook)
+ logsDB(): (dbx.DB)
/**
- * OnSettingsBeforeUpdateRequest hook is triggered before each API
- * Settings update request (after request data load and before settings persistence).
+ * LogsDao returns the app logs Dao instance.
*
- * Could be used to additionally validate the request data or
- * implement completely different persistence behavior.
+ * This Dao could operate only on the tables and models
+ * associated with the logs database. For example, trying to access
+ * the users table from LogsDao will result in error.
*/
- onSettingsBeforeUpdateRequest(): (hook.Hook)
+ logsDao(): (daos.Dao)
/**
- * OnSettingsAfterUpdateRequest hook is triggered after each
- * successful API Settings update request.
+ * Logger returns the active app logger.
*/
- onSettingsAfterUpdateRequest(): (hook.Hook)
+ logger(): (slog.Logger)
/**
- * OnFileDownloadRequest hook is triggered before each API File download request.
- *
- * Could be used to validate or modify the file response before
- * returning it to the client.
+ * DataDir returns the app data directory path.
*/
- onFileDownloadRequest(...tags: string[]): (hook.TaggedHook)
+ dataDir(): string
/**
- * OnFileBeforeTokenRequest hook is triggered before each file
- * token API request.
- *
- * If no token or model was submitted, e.Model and e.Token will be empty,
- * allowing you to implement your own custom model file auth implementation.
- *
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * EncryptionEnv returns the name of the app secret env key
+ * (used for settings encryption).
*/
- onFileBeforeTokenRequest(...tags: string[]): (hook.TaggedHook)
+ encryptionEnv(): string
/**
- * OnFileAfterTokenRequest hook is triggered after each
- * successful file token API request.
- *
- * If the optional "tags" list (Collection ids or names) is specified,
- * then all event handlers registered via the created hook will be
- * triggered and called only if their event data origin matches the tags.
+ * IsDev returns whether the app is in dev mode.
*/
- onFileAfterTokenRequest(...tags: string[]): (hook.TaggedHook)
+ isDev(): boolean
/**
- * OnAdminsListRequest hook is triggered on each API Admins list request.
- *
- * Could be used to validate or modify the response before returning it to the client.
+ * Settings returns the loaded app settings.
*/
- onAdminsListRequest(): (hook.Hook)
+ settings(): (settings.Settings)
/**
- * OnAdminViewRequest hook is triggered on each API Admin view request.
- *
- * Could be used to validate or modify the response before returning it to the client.
+ * Deprecated: Use app.Store() instead.
*/
- onAdminViewRequest(): (hook.Hook