diff --git a/backend/.sqlx/query-3317484a9c09c07c2c9db9debaecc4a4d518093ab48e79365dbb808068e0b8ff.json b/backend/.sqlx/query-0f5a31f328e59befb7dd3c3cb44439a0405d479e02ac79c2f4ec9a97636bd80d.json similarity index 54% rename from backend/.sqlx/query-3317484a9c09c07c2c9db9debaecc4a4d518093ab48e79365dbb808068e0b8ff.json rename to backend/.sqlx/query-0f5a31f328e59befb7dd3c3cb44439a0405d479e02ac79c2f4ec9a97636bd80d.json index edabfc0be8971..734f76ba0af9b 100644 --- a/backend/.sqlx/query-3317484a9c09c07c2c9db9debaecc4a4d518093ab48e79365dbb808068e0b8ff.json +++ b/backend/.sqlx/query-0f5a31f328e59befb7dd3c3cb44439a0405d479e02ac79c2f4ec9a97636bd80d.json @@ -1,17 +1,16 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM variable WHERE path = $1 AND workspace_id = $2 RETURNING path", + "query": "SELECT token_hash FROM token WHERE token_hash = $1", "describe": { "columns": [ { "ordinal": 0, - "name": "path", + "name": "token_hash", "type_info": "Varchar" } ], "parameters": { "Left": [ - "Text", "Text" ] }, @@ -19,5 +18,5 @@ false ] }, - "hash": "3317484a9c09c07c2c9db9debaecc4a4d518093ab48e79365dbb808068e0b8ff" + "hash": "0f5a31f328e59befb7dd3c3cb44439a0405d479e02ac79c2f4ec9a97636bd80d" } diff --git a/backend/.sqlx/query-bb446cbb20166f274a7ee6e88abaa27e233e60e18b3d35545005eb680701241f.json b/backend/.sqlx/query-104fc7e5433abd7247323c5ef76b85f937776a6b47cd99c648bb4d819d3cfe57.json similarity index 75% rename from backend/.sqlx/query-bb446cbb20166f274a7ee6e88abaa27e233e60e18b3d35545005eb680701241f.json rename to backend/.sqlx/query-104fc7e5433abd7247323c5ef76b85f937776a6b47cd99c648bb4d819d3cfe57.json index 9085383617bfb..a59afbf3ef49e 100644 --- a/backend/.sqlx/query-bb446cbb20166f274a7ee6e88abaa27e233e60e18b3d35545005eb680701241f.json +++ b/backend/.sqlx/query-104fc7e5433abd7247323c5ef76b85f937776a6b47cd99c648bb4d819d3cfe57.json @@ -1,12 +1,12 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM token WHERE expiration <= now()\n RETURNING substring(token for 10) as token_prefix, label, email, workspace_id", + "query": "DELETE FROM token WHERE expiration <= now()\n RETURNING token_prefix, label, email, workspace_id", "describe": { "columns": [ { "ordinal": 0, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 1, @@ -28,11 +28,11 @@ "Left": [] }, "nullable": [ - null, + false, true, true, true ] }, - "hash": "bb446cbb20166f274a7ee6e88abaa27e233e60e18b3d35545005eb680701241f" + "hash": "104fc7e5433abd7247323c5ef76b85f937776a6b47cd99c648bb4d819d3cfe57" } diff --git a/backend/.sqlx/query-15ef5759a2ccd7b7f9fd3f2ce0d54d01fe0a2c7e9692ac4ce29a86eb509e1a1d.json b/backend/.sqlx/query-15ef5759a2ccd7b7f9fd3f2ce0d54d01fe0a2c7e9692ac4ce29a86eb509e1a1d.json deleted file mode 100644 index 5552400f28f87..0000000000000 --- a/backend/.sqlx/query-15ef5759a2ccd7b7f9fd3f2ce0d54d01fe0a2c7e9692ac4ce29a86eb509e1a1d.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token\n (token, label, super_admin, email)\n VALUES ($1, $2, $3, $4)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Varchar", - "Bool", - "Varchar" - ] - }, - "nullable": [] - }, - "hash": "15ef5759a2ccd7b7f9fd3f2ce0d54d01fe0a2c7e9692ac4ce29a86eb509e1a1d" -} diff --git a/backend/.sqlx/query-1a2470da1015634d15952819f482749ef04e1a8c944c0fb7696e387d10370217.json b/backend/.sqlx/query-1a2470da1015634d15952819f482749ef04e1a8c944c0fb7696e387d10370217.json new file mode 100644 index 0000000000000..85eaed8eda186 --- /dev/null +++ b/backend/.sqlx/query-1a2470da1015634d15952819f482749ef04e1a8c944c0fb7696e387d10370217.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin)\n VALUES ($1, $2, $3, 'test@windmill.dev', 'webhook-test', false)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "1a2470da1015634d15952819f482749ef04e1a8c944c0fb7696e387d10370217" +} diff --git a/backend/.sqlx/query-ee537def1ead8bee48bb9f5c1f57d42e7add6011c34d91761ba23e2c74c4032c.json b/backend/.sqlx/query-1a69ef11a3f361f105c2a8af7b7fa182f3953150ade1756259b31a50e9308fce.json similarity index 80% rename from backend/.sqlx/query-ee537def1ead8bee48bb9f5c1f57d42e7add6011c34d91761ba23e2c74c4032c.json rename to backend/.sqlx/query-1a69ef11a3f361f105c2a8af7b7fa182f3953150ade1756259b31a50e9308fce.json index 536175599d0f3..492fffe8be2ff 100644 --- a/backend/.sqlx/query-ee537def1ead8bee48bb9f5c1f57d42e7add6011c34d91761ba23e2c74c4032c.json +++ b/backend/.sqlx/query-1a69ef11a3f361f105c2a8af7b7fa182f3953150ade1756259b31a50e9308fce.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n external_id,\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n script_path,\n is_flow,\n webhook_token_prefix,\n service_config,\n error,\n created_at,\n updated_at\n FROM\n native_trigger\n WHERE\n workspace_id = $1\n AND service_name = $2\n AND script_path = $3\n AND is_flow = $4\n LIMIT 1\n ", + "query": "\n SELECT\n external_id,\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n script_path,\n is_flow,\n webhook_token_hash,\n service_config,\n error,\n created_at,\n updated_at\n FROM\n native_trigger\n WHERE\n workspace_id = $1\n AND service_name = $2\n AND script_path = $3\n AND is_flow = $4\n LIMIT 1\n ", "describe": { "columns": [ { @@ -40,7 +40,7 @@ }, { "ordinal": 5, - "name": "webhook_token_prefix", + "name": "webhook_token_hash", "type_info": "Varchar" }, { @@ -95,5 +95,5 @@ false ] }, - "hash": "ee537def1ead8bee48bb9f5c1f57d42e7add6011c34d91761ba23e2c74c4032c" + "hash": "1a69ef11a3f361f105c2a8af7b7fa182f3953150ade1756259b31a50e9308fce" } diff --git a/backend/.sqlx/query-8be2919c3511575c89b882b112b987fd5724c299cb285f819a2561260404e513.json b/backend/.sqlx/query-1bf4a93cb85c6eed313a2f393da9408dd2aa4e47ef7a38a0d3ccca944a09f5bb.json similarity index 68% rename from backend/.sqlx/query-8be2919c3511575c89b882b112b987fd5724c299cb285f819a2561260404e513.json rename to backend/.sqlx/query-1bf4a93cb85c6eed313a2f393da9408dd2aa4e47ef7a38a0d3ccca944a09f5bb.json index 191b010aec724..ae055e1b5fa92 100644 --- a/backend/.sqlx/query-8be2919c3511575c89b882b112b987fd5724c299cb285f819a2561260404e513.json +++ b/backend/.sqlx/query-1bf4a93cb85c6eed313a2f393da9408dd2aa4e47ef7a38a0d3ccca944a09f5bb.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT label, concat(substring(token for 10)) as token_prefix, expiration, created_at, last_used_at, scopes FROM token WHERE email = $1 AND (label != 'ephemeral-script' OR label IS NULL)\n ORDER BY created_at DESC LIMIT $2 OFFSET $3", + "query": "SELECT label, token_prefix, expiration, created_at, last_used_at, scopes FROM token WHERE email = $1 AND (label != 'ephemeral-script' OR label IS NULL)\n ORDER BY created_at DESC LIMIT $2 OFFSET $3", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 2, @@ -43,12 +43,12 @@ }, "nullable": [ true, - null, + false, true, false, false, true ] }, - "hash": "8be2919c3511575c89b882b112b987fd5724c299cb285f819a2561260404e513" + "hash": "1bf4a93cb85c6eed313a2f393da9408dd2aa4e47ef7a38a0d3ccca944a09f5bb" } diff --git a/backend/.sqlx/query-6a254de9005594dc75a59a545546417c8a5aa7635be1dc0b37dc29d0f9e7c163.json b/backend/.sqlx/query-207106aa8267fe756989f3ee1eadb7e169d07463f67f1da79c8bc23c1079c185.json similarity index 61% rename from backend/.sqlx/query-6a254de9005594dc75a59a545546417c8a5aa7635be1dc0b37dc29d0f9e7c163.json rename to backend/.sqlx/query-207106aa8267fe756989f3ee1eadb7e169d07463f67f1da79c8bc23c1079c185.json index 3e7972dbd7f0b..e1cdb6416db39 100644 --- a/backend/.sqlx/query-6a254de9005594dc75a59a545546417c8a5aa7635be1dc0b37dc29d0f9e7c163.json +++ b/backend/.sqlx/query-207106aa8267fe756989f3ee1eadb7e169d07463f67f1da79c8bc23c1079c185.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT label,\n concat(substring(token for 10)) AS token_prefix,\n expiration,\n created_at,\n last_used_at,\n scopes,\n email\n FROM token\n WHERE workspace_id = $1\n AND (\n scopes @> ARRAY['jobs:run:flows:' || $2]::text[]\n OR scopes @> ARRAY['run:flow/' || $2]::text[]\n )\n ", + "query": "\n SELECT label,\n token_prefix,\n expiration,\n created_at,\n last_used_at,\n scopes,\n email\n FROM token\n WHERE workspace_id = $1\n AND (\n scopes @> ARRAY['jobs:run:scripts:' || $2]::text[]\n OR scopes @> ARRAY['run:script/' || $2]::text[]\n )\n ", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 2, @@ -47,7 +47,7 @@ }, "nullable": [ true, - null, + false, true, false, false, @@ -55,5 +55,5 @@ true ] }, - "hash": "6a254de9005594dc75a59a545546417c8a5aa7635be1dc0b37dc29d0f9e7c163" + "hash": "207106aa8267fe756989f3ee1eadb7e169d07463f67f1da79c8bc23c1079c185" } diff --git a/backend/.sqlx/query-1bdf186d3b99bbd913cbf95150105470cd5f1d4ddbb147cb8ce46f9d1da5dfaf.json b/backend/.sqlx/query-215163b5a2791c51f9b28681c1ca1a47475dcf1a388c613a9e0154aef6582a23.json similarity index 55% rename from backend/.sqlx/query-1bdf186d3b99bbd913cbf95150105470cd5f1d4ddbb147cb8ce46f9d1da5dfaf.json rename to backend/.sqlx/query-215163b5a2791c51f9b28681c1ca1a47475dcf1a388c613a9e0154aef6582a23.json index b58e3bf8d81b9..54a7a25b68bb5 100644 --- a/backend/.sqlx/query-1bdf186d3b99bbd913cbf95150105470cd5f1d4ddbb147cb8ce46f9d1da5dfaf.json +++ b/backend/.sqlx/query-215163b5a2791c51f9b28681c1ca1a47475dcf1a388c613a9e0154aef6582a23.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "WITH email_lookup AS (\n SELECT email FROM token WHERE token = $1\n )\n DELETE FROM token\n WHERE email = (SELECT email FROM email_lookup) AND label = 'session'\n RETURNING email", + "query": "WITH email_lookup AS (\n SELECT email FROM token WHERE token_hash = $1\n )\n DELETE FROM token\n WHERE email = (SELECT email FROM email_lookup) AND label = 'session'\n RETURNING email", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "1bdf186d3b99bbd913cbf95150105470cd5f1d4ddbb147cb8ce46f9d1da5dfaf" + "hash": "215163b5a2791c51f9b28681c1ca1a47475dcf1a388c613a9e0154aef6582a23" } diff --git a/backend/.sqlx/query-223fbd972728d5b3ec5b1708e3f2e1f4901b0382fca50704c9544cdec5f9352c.json b/backend/.sqlx/query-223fbd972728d5b3ec5b1708e3f2e1f4901b0382fca50704c9544cdec5f9352c.json new file mode 100644 index 0000000000000..8595b12ec6e33 --- /dev/null +++ b/backend/.sqlx/query-223fbd972728d5b3ec5b1708e3f2e1f4901b0382fca50704c9544cdec5f9352c.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token (token_hash, token_prefix, token, email, label, expiration, scopes, workspace_id)\n SELECT $1::varchar, $2::varchar, $3::varchar, $4::varchar, $5::varchar, now() + ($6 || ' seconds')::interval, $7::text[], $8::varchar\n WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $8 AND deleted = true)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Text", + "TextArray", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "223fbd972728d5b3ec5b1708e3f2e1f4901b0382fca50704c9544cdec5f9352c" +} diff --git a/backend/.sqlx/query-27cafd840e5f2c85d1c1e02d84a1b372e9d40dee29a10fb8fec89492fc501556.json b/backend/.sqlx/query-27cafd840e5f2c85d1c1e02d84a1b372e9d40dee29a10fb8fec89492fc501556.json new file mode 100644 index 0000000000000..f2acda445da42 --- /dev/null +++ b/backend/.sqlx/query-27cafd840e5f2c85d1c1e02d84a1b372e9d40dee29a10fb8fec89492fc501556.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT encode(sha256('SECRET_TOKEN'::bytea), 'hex') AS hash", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "27cafd840e5f2c85d1c1e02d84a1b372e9d40dee29a10fb8fec89492fc501556" +} diff --git a/backend/.sqlx/query-406bcbf55758b10243c8eaff1c349b8082c0052d626bf67e08317e56ab9ad026.json b/backend/.sqlx/query-406bcbf55758b10243c8eaff1c349b8082c0052d626bf67e08317e56ab9ad026.json new file mode 100644 index 0000000000000..2b5b68dfaeca9 --- /dev/null +++ b/backend/.sqlx/query-406bcbf55758b10243c8eaff1c349b8082c0052d626bf67e08317e56ab9ad026.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT label, email, scopes, workspace_id, super_admin, owner, expiration FROM token WHERE token_hash = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "label", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "scopes", + "type_info": "TextArray" + }, + { + "ordinal": 3, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "super_admin", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "owner", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "expiration", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + false, + true, + true + ] + }, + "hash": "406bcbf55758b10243c8eaff1c349b8082c0052d626bf67e08317e56ab9ad026" +} diff --git a/backend/.sqlx/query-27ada97cb533c8595f1d73987c7823d8e54c96889e06895c57cafae9ca27bf8b.json b/backend/.sqlx/query-40a8bf6a5a42c275d73221bc5f386f2e18cb911352551d0a34bf1933e558674e.json similarity index 63% rename from backend/.sqlx/query-27ada97cb533c8595f1d73987c7823d8e54c96889e06895c57cafae9ca27bf8b.json rename to backend/.sqlx/query-40a8bf6a5a42c275d73221bc5f386f2e18cb911352551d0a34bf1933e558674e.json index 1199e9441ff02..706fa0c9ead6d 100644 --- a/backend/.sqlx/query-27ada97cb533c8595f1d73987c7823d8e54c96889e06895c57cafae9ca27bf8b.json +++ b/backend/.sqlx/query-40a8bf6a5a42c275d73221bc5f386f2e18cb911352551d0a34bf1933e558674e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE native_trigger\n SET script_path = $1, is_flow = $2, webhook_token_prefix = $3, service_config = $4, error = NULL, updated_at = NOW()\n WHERE\n workspace_id = $5\n AND service_name = $6\n AND external_id = $7\n ", + "query": "\n UPDATE native_trigger\n SET script_path = $1, is_flow = $2, webhook_token_hash = $3, service_config = $4, error = NULL, updated_at = NOW()\n WHERE\n workspace_id = $5\n AND service_name = $6\n AND external_id = $7\n ", "describe": { "columns": [], "parameters": { @@ -26,5 +26,5 @@ }, "nullable": [] }, - "hash": "27ada97cb533c8595f1d73987c7823d8e54c96889e06895c57cafae9ca27bf8b" + "hash": "40a8bf6a5a42c275d73221bc5f386f2e18cb911352551d0a34bf1933e558674e" } diff --git a/backend/.sqlx/query-66e0968fe9f757755945a7010153821cf73ace9d6692750ccc4cca37701ed77a.json b/backend/.sqlx/query-4b76c4a387786bc5bb69e4c684c34b936c3ffff44ae58f0709d05ba3ff534f79.json similarity index 54% rename from backend/.sqlx/query-66e0968fe9f757755945a7010153821cf73ace9d6692750ccc4cca37701ed77a.json rename to backend/.sqlx/query-4b76c4a387786bc5bb69e4c684c34b936c3ffff44ae58f0709d05ba3ff534f79.json index 3d63bbcfbf590..8fc87ce587310 100644 --- a/backend/.sqlx/query-66e0968fe9f757755945a7010153821cf73ace9d6692750ccc4cca37701ed77a.json +++ b/backend/.sqlx/query-4b76c4a387786bc5bb69e4c684c34b936c3ffff44ae58f0709d05ba3ff534f79.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM token WHERE token = $1", + "query": "DELETE FROM token WHERE token_hash = $1", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "66e0968fe9f757755945a7010153821cf73ace9d6692750ccc4cca37701ed77a" + "hash": "4b76c4a387786bc5bb69e4c684c34b936c3ffff44ae58f0709d05ba3ff534f79" } diff --git a/backend/.sqlx/query-4bf2f3c6771ab4a15b94ba713ebaab2b35961f750600500e3736edcff1c191fe.json b/backend/.sqlx/query-4bf2f3c6771ab4a15b94ba713ebaab2b35961f750600500e3736edcff1c191fe.json new file mode 100644 index 0000000000000..10c513f2769b5 --- /dev/null +++ b/backend/.sqlx/query-4bf2f3c6771ab4a15b94ba713ebaab2b35961f750600500e3736edcff1c191fe.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT token_hash FROM token WHERE email = 'test@windmill.dev' AND label = 'test token'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "token_hash", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "4bf2f3c6771ab4a15b94ba713ebaab2b35961f750600500e3736edcff1c191fe" +} diff --git a/backend/.sqlx/query-29673d489fbf45fc249da04c1a2fd60e2364ba87263f962ed7d4329c916620a1.json b/backend/.sqlx/query-4c7231f24fd0bcc99004c5bd4065697cd321b397422a7c689b85216bbb1fd525.json similarity index 61% rename from backend/.sqlx/query-29673d489fbf45fc249da04c1a2fd60e2364ba87263f962ed7d4329c916620a1.json rename to backend/.sqlx/query-4c7231f24fd0bcc99004c5bd4065697cd321b397422a7c689b85216bbb1fd525.json index 251fce263710b..5cf5699f2a193 100644 --- a/backend/.sqlx/query-29673d489fbf45fc249da04c1a2fd60e2364ba87263f962ed7d4329c916620a1.json +++ b/backend/.sqlx/query-4c7231f24fd0bcc99004c5bd4065697cd321b397422a7c689b85216bbb1fd525.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT label,\n concat(substring(token for 10)) AS token_prefix,\n expiration,\n created_at,\n last_used_at,\n scopes,\n email\n FROM token\n WHERE workspace_id = $1\n AND (\n scopes @> ARRAY['jobs:run:scripts:' || $2]::text[]\n OR scopes @> ARRAY['run:script/' || $2]::text[]\n )\n ", + "query": "\n SELECT label,\n token_prefix,\n expiration,\n created_at,\n last_used_at,\n scopes,\n email\n FROM token\n WHERE workspace_id = $1\n AND (\n scopes @> ARRAY['jobs:run:flows:' || $2]::text[]\n OR scopes @> ARRAY['run:flow/' || $2]::text[]\n )\n ", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 2, @@ -47,7 +47,7 @@ }, "nullable": [ true, - null, + false, true, false, false, @@ -55,5 +55,5 @@ true ] }, - "hash": "29673d489fbf45fc249da04c1a2fd60e2364ba87263f962ed7d4329c916620a1" + "hash": "4c7231f24fd0bcc99004c5bd4065697cd321b397422a7c689b85216bbb1fd525" } diff --git a/backend/.sqlx/query-4e88aec662ebc70e0425a48a1b4e2e60e3183fa81a411622891caea6dc03fa90.json b/backend/.sqlx/query-4e88aec662ebc70e0425a48a1b4e2e60e3183fa81a411622891caea6dc03fa90.json new file mode 100644 index 0000000000000..27d46b27ed081 --- /dev/null +++ b/backend/.sqlx/query-4e88aec662ebc70e0425a48a1b4e2e60e3183fa81a411622891caea6dc03fa90.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token (token_hash, token_prefix, email, label, super_admin, owner, workspace_id)\n VALUES ($1, $2, 'charlie@windmill.dev', 'Charlie new token', false, 'u/charlie', 'test-workspace')", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "4e88aec662ebc70e0425a48a1b4e2e60e3183fa81a411622891caea6dc03fa90" +} diff --git a/backend/.sqlx/query-2d6607b3c38fe72b5663c32de58dacbabed4c5ae28101e3ae2694f96fd055a91.json b/backend/.sqlx/query-52379713a1f7312127bcd13c9a8027a85270c25c5a0f0d4d7670bd602bd3cebf.json similarity index 65% rename from backend/.sqlx/query-2d6607b3c38fe72b5663c32de58dacbabed4c5ae28101e3ae2694f96fd055a91.json rename to backend/.sqlx/query-52379713a1f7312127bcd13c9a8027a85270c25c5a0f0d4d7670bd602bd3cebf.json index da0ce607091e2..50a30bd742e3c 100644 --- a/backend/.sqlx/query-2d6607b3c38fe72b5663c32de58dacbabed4c5ae28101e3ae2694f96fd055a91.json +++ b/backend/.sqlx/query-52379713a1f7312127bcd13c9a8027a85270c25c5a0f0d4d7670bd602bd3cebf.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM token WHERE workspace_id = $1 AND label IS DISTINCT FROM 'session' RETURNING token", + "query": "DELETE FROM token WHERE workspace_id = $1 AND label IS DISTINCT FROM 'session' RETURNING token_prefix", "describe": { "columns": [ { "ordinal": 0, - "name": "token", + "name": "token_prefix", "type_info": "Varchar" } ], @@ -18,5 +18,5 @@ false ] }, - "hash": "2d6607b3c38fe72b5663c32de58dacbabed4c5ae28101e3ae2694f96fd055a91" + "hash": "52379713a1f7312127bcd13c9a8027a85270c25c5a0f0d4d7670bd602bd3cebf" } diff --git a/backend/.sqlx/query-54756c6c39888feb2206b056df1c84c3bb44adc490309954359845c06b6e607c.json b/backend/.sqlx/query-54756c6c39888feb2206b056df1c84c3bb44adc490309954359845c06b6e607c.json deleted file mode 100644 index 39355ffc0c618..0000000000000 --- a/backend/.sqlx/query-54756c6c39888feb2206b056df1c84c3bb44adc490309954359845c06b6e607c.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token\n (token, email, label, expiration, super_admin)\n VALUES ($1, $2, $3, $4, $5)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Varchar", - "Varchar", - "Timestamptz", - "Bool" - ] - }, - "nullable": [] - }, - "hash": "54756c6c39888feb2206b056df1c84c3bb44adc490309954359845c06b6e607c" -} diff --git a/backend/.sqlx/query-54c0c20fe025d4fb45f04ff3389b25915f671e7c52426fc54b2fd533b90596e2.json b/backend/.sqlx/query-54c0c20fe025d4fb45f04ff3389b25915f671e7c52426fc54b2fd533b90596e2.json new file mode 100644 index 0000000000000..de1b71a1e6a2d --- /dev/null +++ b/backend/.sqlx/query-54c0c20fe025d4fb45f04ff3389b25915f671e7c52426fc54b2fd533b90596e2.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token\n (token_hash, token_prefix, token, email, label, expiration, super_admin)\n VALUES ($1, $2, $3, $4, $5, now() + ($6 || ' seconds')::interval, $7)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Text", + "Bool" + ] + }, + "nullable": [] + }, + "hash": "54c0c20fe025d4fb45f04ff3389b25915f671e7c52426fc54b2fd533b90596e2" +} diff --git a/backend/.sqlx/query-56031289603fbf9c60ff2c04750fa0e94550eb617612c2bba81b9ce150d355b5.json b/backend/.sqlx/query-56031289603fbf9c60ff2c04750fa0e94550eb617612c2bba81b9ce150d355b5.json new file mode 100644 index 0000000000000..cc515a4e8fe14 --- /dev/null +++ b/backend/.sqlx/query-56031289603fbf9c60ff2c04750fa0e94550eb617612c2bba81b9ce150d355b5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT token FROM token WHERE token_hash = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "token", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "56031289603fbf9c60ff2c04750fa0e94550eb617612c2bba81b9ce150d355b5" +} diff --git a/backend/.sqlx/query-58dc872520beaa914fef8b7f30e578261fb9ebd92a81e1f2c8edaf93cece0819.json b/backend/.sqlx/query-58dc872520beaa914fef8b7f30e578261fb9ebd92a81e1f2c8edaf93cece0819.json deleted file mode 100644 index f1346579a2c5f..0000000000000 --- a/backend/.sqlx/query-58dc872520beaa914fef8b7f30e578261fb9ebd92a81e1f2c8edaf93cece0819.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM token\n WHERE token LIKE concat($1::text, '%')\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text" - ] - }, - "nullable": [] - }, - "hash": "58dc872520beaa914fef8b7f30e578261fb9ebd92a81e1f2c8edaf93cece0819" -} diff --git a/backend/.sqlx/query-5c09c2ffb28f6eee3d7e48bd6373c0bcddc0943346f02315b962db3b13590d30.json b/backend/.sqlx/query-5c09c2ffb28f6eee3d7e48bd6373c0bcddc0943346f02315b962db3b13590d30.json new file mode 100644 index 0000000000000..25bbd4f835be9 --- /dev/null +++ b/backend/.sqlx/query-5c09c2ffb28f6eee3d7e48bd6373c0bcddc0943346f02315b962db3b13590d30.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "5c09c2ffb28f6eee3d7e48bd6373c0bcddc0943346f02315b962db3b13590d30" +} diff --git a/backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json b/backend/.sqlx/query-6c75c89fb215c646f54f2036c40a86a4f4ea8880cc5d0b511aa70b6fd50072c5.json similarity index 71% rename from backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json rename to backend/.sqlx/query-6c75c89fb215c646f54f2036c40a86a4f4ea8880cc5d0b511aa70b6fd50072c5.json index d6240c6b3ae55..bf734e5861e6e 100644 --- a/backend/.sqlx/query-83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75.json +++ b/backend/.sqlx/query-6c75c89fb215c646f54f2036c40a86a4f4ea8880cc5d0b511aa70b6fd50072c5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT external_id, webhook_token_prefix FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", + "query": "SELECT external_id, webhook_token_hash FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", "describe": { "columns": [ { @@ -10,7 +10,7 @@ }, { "ordinal": 1, - "name": "webhook_token_prefix", + "name": "webhook_token_hash", "type_info": "Varchar" } ], @@ -35,5 +35,5 @@ false ] }, - "hash": "83d6e371ca84903e9f487afc065353a9f7be86ff752612909587ec3cb770cb75" + "hash": "6c75c89fb215c646f54f2036c40a86a4f4ea8880cc5d0b511aa70b6fd50072c5" } diff --git a/backend/.sqlx/query-023cdbc77ea9e2c17a1aa92a5b9001f29e58e81b3f782887db6e0a627dd8ad75.json b/backend/.sqlx/query-6f9386dfcb4c201525722aee3caa25bf2f3a35d90f7354c7d3aef8a3538a03a7.json similarity index 62% rename from backend/.sqlx/query-023cdbc77ea9e2c17a1aa92a5b9001f29e58e81b3f782887db6e0a627dd8ad75.json rename to backend/.sqlx/query-6f9386dfcb4c201525722aee3caa25bf2f3a35d90f7354c7d3aef8a3538a03a7.json index d3f1c39c7a6c6..69af249a3fc41 100644 --- a/backend/.sqlx/query-023cdbc77ea9e2c17a1aa92a5b9001f29e58e81b3f782887db6e0a627dd8ad75.json +++ b/backend/.sqlx/query-6f9386dfcb4c201525722aee3caa25bf2f3a35d90f7354c7d3aef8a3538a03a7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO native_trigger (\n external_id,\n workspace_id,\n service_name,\n script_path,\n is_flow,\n webhook_token_prefix,\n service_config\n ) VALUES (\n $1, $2, $3, $4, $5, $6, $7\n )\n ON CONFLICT (external_id, workspace_id, service_name)\n DO UPDATE SET script_path = $4, is_flow = $5, webhook_token_prefix = $6, service_config = $7, error = NULL, updated_at = NOW()\n ", + "query": "\n INSERT INTO native_trigger (\n external_id,\n workspace_id,\n service_name,\n script_path,\n is_flow,\n webhook_token_hash,\n service_config\n ) VALUES (\n $1, $2, $3, $4, $5, $6, $7\n )\n ON CONFLICT (external_id, workspace_id, service_name)\n DO UPDATE SET script_path = $4, is_flow = $5, webhook_token_hash = $6, service_config = $7, error = NULL, updated_at = NOW()\n ", "describe": { "columns": [], "parameters": { @@ -26,5 +26,5 @@ }, "nullable": [] }, - "hash": "023cdbc77ea9e2c17a1aa92a5b9001f29e58e81b3f782887db6e0a627dd8ad75" + "hash": "6f9386dfcb4c201525722aee3caa25bf2f3a35d90f7354c7d3aef8a3538a03a7" } diff --git a/backend/.sqlx/query-722f9da2b3ad1e1129928c52498b994db0dc1728945f90fd23b707ee355d0472.json b/backend/.sqlx/query-722f9da2b3ad1e1129928c52498b994db0dc1728945f90fd23b707ee355d0472.json new file mode 100644 index 0000000000000..b3f0d62ca8134 --- /dev/null +++ b/backend/.sqlx/query-722f9da2b3ad1e1129928c52498b994db0dc1728945f90fd23b707ee355d0472.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM token WHERE token_hash = $1 RETURNING email", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "722f9da2b3ad1e1129928c52498b994db0dc1728945f90fd23b707ee355d0472" +} diff --git a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json b/backend/.sqlx/query-8065ed67770101e30eea456c1c682e1900d97721d931cea80a4fe240901b3604.json similarity index 57% rename from backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json rename to backend/.sqlx/query-8065ed67770101e30eea456c1c682e1900d97721d931cea80a4fe240901b3604.json index 4843d959c190c..fef6764ba94ab 100644 --- a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json +++ b/backend/.sqlx/query-8065ed67770101e30eea456c1c682e1900d97721d931cea80a4fe240901b3604.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT true FROM token WHERE token = $1 and expiration IS NOT NULL and expiration > now() + $2::int * '1 sec'::interval", + "query": "SELECT true FROM token WHERE token_hash = $1 and expiration IS NOT NULL and expiration > now() + $2::int * '1 sec'::interval", "describe": { "columns": [ { @@ -19,5 +19,5 @@ null ] }, - "hash": "bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0" + "hash": "8065ed67770101e30eea456c1c682e1900d97721d931cea80a4fe240901b3604" } diff --git a/backend/.sqlx/query-8515537f73c132e62c4dafad1e8d8e56f0c70dccd0edbc9667f550b31ab54c18.json b/backend/.sqlx/query-8515537f73c132e62c4dafad1e8d8e56f0c70dccd0edbc9667f550b31ab54c18.json deleted file mode 100644 index b03708c8134cf..0000000000000 --- a/backend/.sqlx/query-8515537f73c132e62c4dafad1e8d8e56f0c70dccd0edbc9667f550b31ab54c18.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token (token, email, label, super_admin, owner, workspace_id)\n VALUES ('CHARLIE_TOKEN_NEW', 'charlie@windmill.dev', 'Charlie new token', false, 'u/charlie', 'test-workspace')", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "8515537f73c132e62c4dafad1e8d8e56f0c70dccd0edbc9667f550b31ab54c18" -} diff --git a/backend/.sqlx/query-88b6a76134a822d4b706c361a7c71f5a0ab04cc4ac9f236ffd056d3acdb79711.json b/backend/.sqlx/query-88b6a76134a822d4b706c361a7c71f5a0ab04cc4ac9f236ffd056d3acdb79711.json new file mode 100644 index 0000000000000..f96090bad55c7 --- /dev/null +++ b/backend/.sqlx/query-88b6a76134a822d4b706c361a7c71f5a0ab04cc4ac9f236ffd056d3acdb79711.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin, scopes, workspace_id, owner, expiration)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Bool", + "TextArray", + "Varchar", + "Varchar", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "88b6a76134a822d4b706c361a7c71f5a0ab04cc4ac9f236ffd056d3acdb79711" +} diff --git a/backend/.sqlx/query-8aebd7f7fd1374f1c3d5389e953ebf080df3f76ac3e6e6373a89c8d46388125d.json b/backend/.sqlx/query-8aebd7f7fd1374f1c3d5389e953ebf080df3f76ac3e6e6373a89c8d46388125d.json deleted file mode 100644 index 90a2f78d2c90a..0000000000000 --- a/backend/.sqlx/query-8aebd7f7fd1374f1c3d5389e953ebf080df3f76ac3e6e6373a89c8d46388125d.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token\n (token, email, label, expiration, super_admin)\n VALUES ($1, $2, $3, now() + ($4 || ' seconds')::interval, $5)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Varchar", - "Varchar", - "Text", - "Bool" - ] - }, - "nullable": [] - }, - "hash": "8aebd7f7fd1374f1c3d5389e953ebf080df3f76ac3e6e6373a89c8d46388125d" -} diff --git a/backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json b/backend/.sqlx/query-93aa569329a85799594606a4f77fe955820f7b2761df6b38a6a6615b518188f9.json similarity index 70% rename from backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json rename to backend/.sqlx/query-93aa569329a85799594606a4f77fe955820f7b2761df6b38a6a6615b518188f9.json index a3d8c2502f908..c9faa982baff1 100644 --- a/backend/.sqlx/query-c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3.json +++ b/backend/.sqlx/query-93aa569329a85799594606a4f77fe955820f7b2761df6b38a6a6615b518188f9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE token SET last_used_at = now() WHERE\n token = $1\n AND (expiration > NOW() OR expiration IS NULL)\n AND (workspace_id IS NULL OR workspace_id = $2)\n RETURNING owner, email, super_admin, scopes, label", + "query": "UPDATE token SET last_used_at = now() WHERE\n token_hash = $1\n AND (expiration > NOW() OR expiration IS NULL)\n AND (workspace_id IS NULL OR workspace_id = $2)\n RETURNING owner, email, super_admin, scopes, label", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "c7d595d2a12228c49359440ca3a9622f1de5f5ee4bbe5d2b23f6fdb6379cebf3" + "hash": "93aa569329a85799594606a4f77fe955820f7b2761df6b38a6a6615b518188f9" } diff --git a/backend/.sqlx/query-d7e9b69fef8369117ce057d01d87288b39ea7c802007f112eb3d62230d07abb6.json b/backend/.sqlx/query-94fd0a57cfc9341b2e9deae60506c6c06aa6934b87200da14231f12f65149cd3.json similarity index 62% rename from backend/.sqlx/query-d7e9b69fef8369117ce057d01d87288b39ea7c802007f112eb3d62230d07abb6.json rename to backend/.sqlx/query-94fd0a57cfc9341b2e9deae60506c6c06aa6934b87200da14231f12f65149cd3.json index 015aa7b05a6ca..d54260b4f720a 100644 --- a/backend/.sqlx/query-d7e9b69fef8369117ce057d01d87288b39ea7c802007f112eb3d62230d07abb6.json +++ b/backend/.sqlx/query-94fd0a57cfc9341b2e9deae60506c6c06aa6934b87200da14231f12f65149cd3.json @@ -1,12 +1,12 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM token_expiry_notification n\n USING token t\n WHERE n.token = t.token\n AND n.expiration > now()\n AND n.expiration <= now() + interval '7 days'\n RETURNING substring(t.token for 10) as token_prefix, t.label, t.email, t.workspace_id", + "query": "DELETE FROM token_expiry_notification n\n USING token t\n WHERE n.token_hash = t.token_hash\n AND n.expiration > now()\n AND n.expiration <= now() + interval '7 days'\n RETURNING t.token_prefix, t.label, t.email, t.workspace_id", "describe": { "columns": [ { "ordinal": 0, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 1, @@ -28,11 +28,11 @@ "Left": [] }, "nullable": [ - null, + false, true, true, true ] }, - "hash": "d7e9b69fef8369117ce057d01d87288b39ea7c802007f112eb3d62230d07abb6" + "hash": "94fd0a57cfc9341b2e9deae60506c6c06aa6934b87200da14231f12f65149cd3" } diff --git a/backend/.sqlx/query-2fd22c4ffa2d222bb116260994a748e0639c2f73cbc1d8be66420c70b14c96e1.json b/backend/.sqlx/query-95e77019bca83ce43b629e7aac429b09a60d732099a2de9e001d2b40a8e919a9.json similarity index 57% rename from backend/.sqlx/query-2fd22c4ffa2d222bb116260994a748e0639c2f73cbc1d8be66420c70b14c96e1.json rename to backend/.sqlx/query-95e77019bca83ce43b629e7aac429b09a60d732099a2de9e001d2b40a8e919a9.json index afd0f503bf87c..a8fbb473d8e6b 100644 --- a/backend/.sqlx/query-2fd22c4ffa2d222bb116260994a748e0639c2f73cbc1d8be66420c70b14c96e1.json +++ b/backend/.sqlx/query-95e77019bca83ce43b629e7aac429b09a60d732099a2de9e001d2b40a8e919a9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE native_trigger\n SET service_config = $1, updated_at = NOW()\n WHERE\n workspace_id = $2\n AND service_name = $3\n AND external_id = $4\n ", + "query": "\n UPDATE native_trigger\n SET service_config = $1,\n webhook_token_hash = COALESCE($5, webhook_token_hash),\n updated_at = NOW()\n WHERE\n workspace_id = $2\n AND service_name = $3\n AND external_id = $4\n ", "describe": { "columns": [], "parameters": { @@ -18,10 +18,11 @@ } } }, - "Text" + "Text", + "Varchar" ] }, "nullable": [] }, - "hash": "2fd22c4ffa2d222bb116260994a748e0639c2f73cbc1d8be66420c70b14c96e1" + "hash": "95e77019bca83ce43b629e7aac429b09a60d732099a2de9e001d2b40a8e919a9" } diff --git a/backend/.sqlx/query-983c21be4341a7ff9eb647041aa3642a89b16701e71d624c6adacb652e231a1a.json b/backend/.sqlx/query-983c21be4341a7ff9eb647041aa3642a89b16701e71d624c6adacb652e231a1a.json new file mode 100644 index 0000000000000..959438f58237d --- /dev/null +++ b/backend/.sqlx/query-983c21be4341a7ff9eb647041aa3642a89b16701e71d624c6adacb652e231a1a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT email FROM token WHERE token_hash = $1 AND (expiration > NOW() OR expiration IS NULL)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "983c21be4341a7ff9eb647041aa3642a89b16701e71d624c6adacb652e231a1a" +} diff --git a/backend/.sqlx/query-98d929e1c12a41933f5cfbd79175c57375fdb528a30de49adbe5305fc237b2c5.json b/backend/.sqlx/query-98d929e1c12a41933f5cfbd79175c57375fdb528a30de49adbe5305fc237b2c5.json new file mode 100644 index 0000000000000..69cd7135025e8 --- /dev/null +++ b/backend/.sqlx/query-98d929e1c12a41933f5cfbd79175c57375fdb528a30de49adbe5305fc237b2c5.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token\n (token_hash, token_prefix, token, email, label, expiration, super_admin)\n VALUES ($1, $2, $3, $4, $5, $6, $7)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Timestamptz", + "Bool" + ] + }, + "nullable": [] + }, + "hash": "98d929e1c12a41933f5cfbd79175c57375fdb528a30de49adbe5305fc237b2c5" +} diff --git a/backend/.sqlx/query-9f86d16016ddbed5ff2a87c113a675a2a05eaf30237e21359c52f31bb1bddc73.json b/backend/.sqlx/query-9f86d16016ddbed5ff2a87c113a675a2a05eaf30237e21359c52f31bb1bddc73.json new file mode 100644 index 0000000000000..163dc2285b950 --- /dev/null +++ b/backend/.sqlx/query-9f86d16016ddbed5ff2a87c113a675a2a05eaf30237e21359c52f31bb1bddc73.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token\n (token_hash, token_prefix, token, email, label, expiration, super_admin, scopes, workspace_id)\n SELECT $1, $2, $3, $4, $5, $6, $7, $8, $9\n WHERE $9::varchar IS NULL OR NOT EXISTS(\n SELECT 1 FROM workspace WHERE id = $9 AND deleted = true\n )", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Timestamptz", + "Bool", + "TextArray", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "9f86d16016ddbed5ff2a87c113a675a2a05eaf30237e21359c52f31bb1bddc73" +} diff --git a/backend/.sqlx/query-ecab1af12a7afa685c056b9d0e526275203fc8ecddf83ca6d05c9fb77e46e7ee.json b/backend/.sqlx/query-a115d8ea786907561afdbbc07d11dc715d80b00c0e79b61b0057a3ae3886a85e.json similarity index 64% rename from backend/.sqlx/query-ecab1af12a7afa685c056b9d0e526275203fc8ecddf83ca6d05c9fb77e46e7ee.json rename to backend/.sqlx/query-a115d8ea786907561afdbbc07d11dc715d80b00c0e79b61b0057a3ae3886a85e.json index 16ccdd10e979b..660c855622b38 100644 --- a/backend/.sqlx/query-ecab1af12a7afa685c056b9d0e526275203fc8ecddf83ca6d05c9fb77e46e7ee.json +++ b/backend/.sqlx/query-a115d8ea786907561afdbbc07d11dc715d80b00c0e79b61b0057a3ae3886a85e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n nt.external_id,\n nt.workspace_id,\n nt.service_name AS \"service_name!: ServiceName\",\n nt.script_path,\n nt.is_flow,\n nt.webhook_token_prefix,\n nt.service_config,\n nt.error,\n nt.created_at,\n nt.updated_at\n FROM\n native_trigger nt\n WHERE\n nt.workspace_id = $1 AND\n nt.service_name = $2 AND\n ($5::text IS NULL OR nt.script_path = $5) AND\n ($6::bool IS NULL OR nt.is_flow = $6) AND\n (\n (nt.is_flow = false AND EXISTS (\n SELECT 1 FROM script s\n WHERE s.workspace_id = nt.workspace_id\n AND s.path = nt.script_path\n ))\n OR\n (nt.is_flow = true AND EXISTS (\n SELECT 1 FROM flow f\n WHERE f.workspace_id = nt.workspace_id\n AND f.path = nt.script_path\n ))\n )\n LIMIT $3\n OFFSET $4\n ", + "query": "\n SELECT\n nt.external_id,\n nt.workspace_id,\n nt.service_name AS \"service_name!: ServiceName\",\n nt.script_path,\n nt.is_flow,\n nt.webhook_token_hash,\n nt.service_config,\n nt.error,\n nt.created_at,\n nt.updated_at\n FROM\n native_trigger nt\n WHERE\n nt.workspace_id = $1 AND\n nt.service_name = $2 AND\n ($5::text IS NULL OR nt.script_path = $5) AND\n ($6::bool IS NULL OR nt.is_flow = $6) AND\n (\n (nt.is_flow = false AND EXISTS (\n SELECT 1 FROM script s\n WHERE s.workspace_id = nt.workspace_id\n AND s.path = nt.script_path\n ))\n OR\n (nt.is_flow = true AND EXISTS (\n SELECT 1 FROM flow f\n WHERE f.workspace_id = nt.workspace_id\n AND f.path = nt.script_path\n ))\n )\n LIMIT $3\n OFFSET $4\n ", "describe": { "columns": [ { @@ -40,7 +40,7 @@ }, { "ordinal": 5, - "name": "webhook_token_prefix", + "name": "webhook_token_hash", "type_info": "Varchar" }, { @@ -97,5 +97,5 @@ false ] }, - "hash": "ecab1af12a7afa685c056b9d0e526275203fc8ecddf83ca6d05c9fb77e46e7ee" + "hash": "a115d8ea786907561afdbbc07d11dc715d80b00c0e79b61b0057a3ae3886a85e" } diff --git a/backend/.sqlx/query-a4d973d0f1c293345ad2bfd2472da8d6a3b425ea0590a66f1db6692dd2ddb437.json b/backend/.sqlx/query-a4d973d0f1c293345ad2bfd2472da8d6a3b425ea0590a66f1db6692dd2ddb437.json deleted file mode 100644 index af35d619fa81b..0000000000000 --- a/backend/.sqlx/query-a4d973d0f1c293345ad2bfd2472da8d6a3b425ea0590a66f1db6692dd2ddb437.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token_expiry_notification (token, expiration) VALUES ($1, $2) ON CONFLICT DO NOTHING", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Timestamptz" - ] - }, - "nullable": [] - }, - "hash": "a4d973d0f1c293345ad2bfd2472da8d6a3b425ea0590a66f1db6692dd2ddb437" -} diff --git a/backend/.sqlx/query-b615d73ddb43e9d655b86a0cf98f892bf40e629ee11ee4845199481755f2789d.json b/backend/.sqlx/query-bac545933a627a62b7845d8aab80702443285e4d1d11e5a0f4cd2a3d4add51bb.json similarity index 82% rename from backend/.sqlx/query-b615d73ddb43e9d655b86a0cf98f892bf40e629ee11ee4845199481755f2789d.json rename to backend/.sqlx/query-bac545933a627a62b7845d8aab80702443285e4d1d11e5a0f4cd2a3d4add51bb.json index e6ef13f492222..a90b2b839860d 100644 --- a/backend/.sqlx/query-b615d73ddb43e9d655b86a0cf98f892bf40e629ee11ee4845199481755f2789d.json +++ b/backend/.sqlx/query-bac545933a627a62b7845d8aab80702443285e4d1d11e5a0f4cd2a3d4add51bb.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n external_id,\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n script_path,\n is_flow,\n webhook_token_prefix,\n service_config,\n error,\n created_at,\n updated_at\n FROM\n native_trigger\n WHERE\n workspace_id = $1\n AND service_name = $2\n AND external_id = $3\n ", + "query": "\n SELECT\n external_id,\n workspace_id,\n service_name AS \"service_name!: ServiceName\",\n script_path,\n is_flow,\n webhook_token_hash,\n service_config,\n error,\n created_at,\n updated_at\n FROM\n native_trigger\n WHERE\n workspace_id = $1\n AND service_name = $2\n AND external_id = $3\n ", "describe": { "columns": [ { @@ -40,7 +40,7 @@ }, { "ordinal": 5, - "name": "webhook_token_prefix", + "name": "webhook_token_hash", "type_info": "Varchar" }, { @@ -94,5 +94,5 @@ false ] }, - "hash": "b615d73ddb43e9d655b86a0cf98f892bf40e629ee11ee4845199481755f2789d" + "hash": "bac545933a627a62b7845d8aab80702443285e4d1d11e5a0f4cd2a3d4add51bb" } diff --git a/backend/.sqlx/query-5c9ed4d8d16c77c0c6b42e9ee211168573162745060788fbca188ed405c423cd.json b/backend/.sqlx/query-c2efefded4eaea858c41c32ef20e2c11ed88327cf033e1abfd7c0458b71f53da.json similarity index 86% rename from backend/.sqlx/query-5c9ed4d8d16c77c0c6b42e9ee211168573162745060788fbca188ed405c423cd.json rename to backend/.sqlx/query-c2efefded4eaea858c41c32ef20e2c11ed88327cf033e1abfd7c0458b71f53da.json index 63478d0e66b58..ee946012b270c 100644 --- a/backend/.sqlx/query-5c9ed4d8d16c77c0c6b42e9ee211168573162745060788fbca188ed405c423cd.json +++ b/backend/.sqlx/query-c2efefded4eaea858c41c32ef20e2c11ed88327cf033e1abfd7c0458b71f53da.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE mcp_oauth_refresh_token\n SET used_at = now()\n WHERE refresh_token = $1\n AND client_id = $2\n AND used_at IS NULL\n AND NOT revoked\n AND expires_at > now()\n RETURNING id, refresh_token, access_token, client_id, user_email, workspace_id,\n scopes, token_family, created_at, expires_at, used_at, revoked", + "query": "UPDATE mcp_oauth_refresh_token\n SET used_at = now()\n WHERE refresh_token = $1\n AND client_id = $2\n AND used_at IS NULL\n AND NOT revoked\n AND expires_at > now()\n RETURNING id, refresh_token, access_token_hash, client_id, user_email, workspace_id,\n scopes, token_family, created_at, expires_at, used_at, revoked", "describe": { "columns": [ { @@ -15,7 +15,7 @@ }, { "ordinal": 2, - "name": "access_token", + "name": "access_token_hash", "type_info": "Varchar" }, { @@ -85,5 +85,5 @@ false ] }, - "hash": "5c9ed4d8d16c77c0c6b42e9ee211168573162745060788fbca188ed405c423cd" + "hash": "c2efefded4eaea858c41c32ef20e2c11ed88327cf033e1abfd7c0458b71f53da" } diff --git a/backend/.sqlx/query-55cf43cb9219b43f8e9f94b23b62846cd0b1ef5f64d20b0d975d0058730f427b.json b/backend/.sqlx/query-ca8997323e27f99bfc5ad8c4a54224d43eaab99b9f1b7d55eff25b0225bb1504.json similarity index 66% rename from backend/.sqlx/query-55cf43cb9219b43f8e9f94b23b62846cd0b1ef5f64d20b0d975d0058730f427b.json rename to backend/.sqlx/query-ca8997323e27f99bfc5ad8c4a54224d43eaab99b9f1b7d55eff25b0225bb1504.json index e68f25d6abd8a..86175b0dc454e 100644 --- a/backend/.sqlx/query-55cf43cb9219b43f8e9f94b23b62846cd0b1ef5f64d20b0d975d0058730f427b.json +++ b/backend/.sqlx/query-ca8997323e27f99bfc5ad8c4a54224d43eaab99b9f1b7d55eff25b0225bb1504.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT job FROM token WHERE token = $1", + "query": "SELECT job FROM token WHERE token_hash = $1", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "55cf43cb9219b43f8e9f94b23b62846cd0b1ef5f64d20b0d975d0058730f427b" + "hash": "ca8997323e27f99bfc5ad8c4a54224d43eaab99b9f1b7d55eff25b0225bb1504" } diff --git a/backend/.sqlx/query-d05f20431cd08f737bfbf904efedfdf104e3d77b0725c5355305d19f67359e90.json b/backend/.sqlx/query-d05f20431cd08f737bfbf904efedfdf104e3d77b0725c5355305d19f67359e90.json new file mode 100644 index 0000000000000..e778c17bf6295 --- /dev/null +++ b/backend/.sqlx/query-d05f20431cd08f737bfbf904efedfdf104e3d77b0725c5355305d19f67359e90.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token\n (token_hash, token_prefix, token, label, super_admin, email)\n VALUES ($1, $2, $3, $4, $5, $6)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Bool", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "d05f20431cd08f737bfbf904efedfdf104e3d77b0725c5355305d19f67359e90" +} diff --git a/backend/.sqlx/query-d32448f6b329cf98dad42b218a630c0cf40a99edb4ae9fe3e9be485ab1077b3a.json b/backend/.sqlx/query-d32448f6b329cf98dad42b218a630c0cf40a99edb4ae9fe3e9be485ab1077b3a.json deleted file mode 100644 index 73ffec7e48d17..0000000000000 --- a/backend/.sqlx/query-d32448f6b329cf98dad42b218a630c0cf40a99edb4ae9fe3e9be485ab1077b3a.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token (token, email, label, expiration, scopes, workspace_id)\n SELECT $1::varchar, $2::varchar, $3::varchar, now() + ($4 || ' seconds')::interval, $5::text[], $6::varchar\n WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $6 AND deleted = true)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Varchar", - "Varchar", - "Text", - "TextArray", - "Varchar" - ] - }, - "nullable": [] - }, - "hash": "d32448f6b329cf98dad42b218a630c0cf40a99edb4ae9fe3e9be485ab1077b3a" -} diff --git a/backend/.sqlx/query-97e364c703bdcdfb5345810659cbe0477a28b8199ef0b297f9a22c88a43b6b5c.json b/backend/.sqlx/query-db2d2f67c785f790a1a2bd7181a69945b6baeb1f1e9e36c9949b9d5fe1f78431.json similarity index 65% rename from backend/.sqlx/query-97e364c703bdcdfb5345810659cbe0477a28b8199ef0b297f9a22c88a43b6b5c.json rename to backend/.sqlx/query-db2d2f67c785f790a1a2bd7181a69945b6baeb1f1e9e36c9949b9d5fe1f78431.json index 34ff650dafdb4..eadb48cbb564c 100644 --- a/backend/.sqlx/query-97e364c703bdcdfb5345810659cbe0477a28b8199ef0b297f9a22c88a43b6b5c.json +++ b/backend/.sqlx/query-db2d2f67c785f790a1a2bd7181a69945b6baeb1f1e9e36c9949b9d5fe1f78431.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "DELETE FROM token WHERE token = $1 RETURNING email", + "query": "SELECT email FROM token WHERE token = $1", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "97e364c703bdcdfb5345810659cbe0477a28b8199ef0b297f9a22c88a43b6b5c" + "hash": "db2d2f67c785f790a1a2bd7181a69945b6baeb1f1e9e36c9949b9d5fe1f78431" } diff --git a/backend/.sqlx/query-2c231a2cd267d8d6d28a22d166a50cc6b4df813a15c613eb1960eff202c517f8.json b/backend/.sqlx/query-dd8c63ac04e33e2863ff3712fc6a5209e1ff2c235df1e39ddc3dd21f60f66ef4.json similarity index 57% rename from backend/.sqlx/query-2c231a2cd267d8d6d28a22d166a50cc6b4df813a15c613eb1960eff202c517f8.json rename to backend/.sqlx/query-dd8c63ac04e33e2863ff3712fc6a5209e1ff2c235df1e39ddc3dd21f60f66ef4.json index 32e0372293ae5..37055880aeeae 100644 --- a/backend/.sqlx/query-2c231a2cd267d8d6d28a22d166a50cc6b4df813a15c613eb1960eff202c517f8.json +++ b/backend/.sqlx/query-dd8c63ac04e33e2863ff3712fc6a5209e1ff2c235df1e39ddc3dd21f60f66ef4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO mcp_oauth_refresh_token\n (refresh_token, access_token, client_id, user_email, workspace_id, scopes, token_family, expires_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, now() + ($8 || ' seconds')::interval)", + "query": "INSERT INTO mcp_oauth_refresh_token\n (refresh_token, access_token_hash, client_id, user_email, workspace_id, scopes, token_family, expires_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, now() + ($8 || ' seconds')::interval)", "describe": { "columns": [], "parameters": { @@ -17,5 +17,5 @@ }, "nullable": [] }, - "hash": "2c231a2cd267d8d6d28a22d166a50cc6b4df813a15c613eb1960eff202c517f8" + "hash": "dd8c63ac04e33e2863ff3712fc6a5209e1ff2c235df1e39ddc3dd21f60f66ef4" } diff --git a/backend/.sqlx/query-e33be0991702ae3a295db7defc6d19d914307a95d72bb0fb447e5b367d52f6a0.json b/backend/.sqlx/query-e33be0991702ae3a295db7defc6d19d914307a95d72bb0fb447e5b367d52f6a0.json deleted file mode 100644 index 14757923d1766..0000000000000 --- a/backend/.sqlx/query-e33be0991702ae3a295db7defc6d19d914307a95d72bb0fb447e5b367d52f6a0.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO token\n (token, email, label, expiration, super_admin, scopes, workspace_id)\n SELECT $1, $2, $3, $4, $5, $6, $7\n WHERE $7::varchar IS NULL OR NOT EXISTS(\n SELECT 1 FROM workspace WHERE id = $7 AND deleted = true\n )", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Varchar", - "Varchar", - "Timestamptz", - "Bool", - "TextArray", - "Varchar" - ] - }, - "nullable": [] - }, - "hash": "e33be0991702ae3a295db7defc6d19d914307a95d72bb0fb447e5b367d52f6a0" -} diff --git a/backend/.sqlx/query-e4b5ea8c2a5644471c103463e79030140350134ed4f42478daba17655802f238.json b/backend/.sqlx/query-e4b5ea8c2a5644471c103463e79030140350134ed4f42478daba17655802f238.json new file mode 100644 index 0000000000000..f4e8f0106c1e6 --- /dev/null +++ b/backend/.sqlx/query-e4b5ea8c2a5644471c103463e79030140350134ed4f42478daba17655802f238.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO token_expiry_notification (token_hash, expiration) VALUES ($1, $2) ON CONFLICT DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "e4b5ea8c2a5644471c103463e79030140350134ed4f42478daba17655802f238" +} diff --git a/backend/.sqlx/query-eba16eb819e2644284fb073c891706d78a6f24cb0e614d7d81ba1b643805bf06.json b/backend/.sqlx/query-eba16eb819e2644284fb073c891706d78a6f24cb0e614d7d81ba1b643805bf06.json deleted file mode 100644 index c96961eac49f3..0000000000000 --- a/backend/.sqlx/query-eba16eb819e2644284fb073c891706d78a6f24cb0e614d7d81ba1b643805bf06.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT token as \"token!\"\n FROM token\n WHERE token LIKE concat($1::text, '%')\n LIMIT 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "token!", - "type_info": "Varchar" - } - ], - "parameters": { - "Left": [ - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "eba16eb819e2644284fb073c891706d78a6f24cb0e614d7d81ba1b643805bf06" -} diff --git a/backend/.sqlx/query-3b746f73abbaea3570b9c79af21d4d0f60232098d69b71c21fd3da985f7a5905.json b/backend/.sqlx/query-ebc2eed287f93e184ed683feb20432caa6e6682620c90f38b29dd32b9a8fe633.json similarity index 72% rename from backend/.sqlx/query-3b746f73abbaea3570b9c79af21d4d0f60232098d69b71c21fd3da985f7a5905.json rename to backend/.sqlx/query-ebc2eed287f93e184ed683feb20432caa6e6682620c90f38b29dd32b9a8fe633.json index 3f0749f7bfa3e..657c6602284fd 100644 --- a/backend/.sqlx/query-3b746f73abbaea3570b9c79af21d4d0f60232098d69b71c21fd3da985f7a5905.json +++ b/backend/.sqlx/query-ebc2eed287f93e184ed683feb20432caa6e6682620c90f38b29dd32b9a8fe633.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT label, concat(substring(token for 10)) as token_prefix, expiration, created_at, last_used_at, scopes FROM token WHERE email = $1\n ORDER BY created_at DESC LIMIT $2 OFFSET $3", + "query": "SELECT label, token_prefix, expiration, created_at, last_used_at, scopes FROM token WHERE email = $1\n ORDER BY created_at DESC LIMIT $2 OFFSET $3", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "token_prefix", - "type_info": "Text" + "type_info": "Varchar" }, { "ordinal": 2, @@ -43,12 +43,12 @@ }, "nullable": [ true, - null, + false, true, false, false, true ] }, - "hash": "3b746f73abbaea3570b9c79af21d4d0f60232098d69b71c21fd3da985f7a5905" + "hash": "ebc2eed287f93e184ed683feb20432caa6e6682620c90f38b29dd32b9a8fe633" } diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index fd93c7df8888c..4615ab88106b6 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -04689be519d724e437b64c2ec86d47b3270c9be8 +2d0823a471014e2bc2d898c63518323946b7474f diff --git a/backend/migrations/20260316000000_token_hash.down.sql b/backend/migrations/20260316000000_token_hash.down.sql new file mode 100644 index 0000000000000..02725da65d74d --- /dev/null +++ b/backend/migrations/20260316000000_token_hash.down.sql @@ -0,0 +1,8 @@ +-- Reverse of step 1: drop indexes and columns + +DROP INDEX IF EXISTS idx_token_plaintext; +DROP INDEX IF EXISTS idx_token_prefix; +DROP INDEX IF EXISTS token_hash_unique; + +ALTER TABLE token DROP COLUMN token_hash; +ALTER TABLE token DROP COLUMN token_prefix; diff --git a/backend/migrations/20260316000000_token_hash.up.sql b/backend/migrations/20260316000000_token_hash.up.sql new file mode 100644 index 0000000000000..c0c3bb7375211 --- /dev/null +++ b/backend/migrations/20260316000000_token_hash.up.sql @@ -0,0 +1,31 @@ +-- Step 1: Add columns, backfill, build indexes. +-- This migration does the heavy work but avoids ACCESS EXCLUSIVE during index build +-- by creating the unique index first, then using it for the PK swap in the next migration. + +-- Add new columns (instant metadata change) +ALTER TABLE token ADD COLUMN token_hash VARCHAR(64); +ALTER TABLE token ADD COLUMN token_prefix VARCHAR(10); + +-- Backfill existing tokens using built-in sha256() (no extension needed). +-- Takes ROW EXCLUSIVE lock — concurrent reads and non-token writes proceed normally. +UPDATE token +SET token_hash = encode(sha256(token::bytea), 'hex'), + token_prefix = substring(token for 10) +WHERE token_hash IS NULL; + +-- Mark NOT NULL (instant on PG 12+ when all rows already satisfy the constraint) +ALTER TABLE token ALTER COLUMN token_hash SET NOT NULL; +ALTER TABLE token ALTER COLUMN token_prefix SET NOT NULL; + +-- Build the unique index that the next migration will promote to PK. +-- Takes SHARE lock (reads OK, writes wait) but only for the duration of the build, +-- which is fast since token tables are typically small. +CREATE UNIQUE INDEX token_hash_unique ON token (token_hash); + +-- Index on prefix for deletion/listing +CREATE INDEX idx_token_prefix ON token (token_prefix); + +-- Keep old workers fast during rolling upgrades: they query WHERE token = $1 +-- after the PK swap drops the old primary key index on token. +-- Can be dropped once all workers are past MIN_VERSION_SUPPORTS_TOKEN_HASH. +CREATE INDEX idx_token_plaintext ON token (token) WHERE token IS NOT NULL; diff --git a/backend/migrations/20260316000001_token_hash_pk_swap.down.sql b/backend/migrations/20260316000001_token_hash_pk_swap.down.sql new file mode 100644 index 0000000000000..ab938e3422fec --- /dev/null +++ b/backend/migrations/20260316000001_token_hash_pk_swap.down.sql @@ -0,0 +1,26 @@ +-- Reverse step 2: restore old PK and trigger + +-- Restore the original trigger +CREATE OR REPLACE FUNCTION notify_token_invalidation() +RETURNS TRIGGER AS $$ +BEGIN + IF OLD.label = 'session' AND OLD.email IS NOT NULL THEN + INSERT INTO notify_event (channel, payload) + VALUES ('notify_token_invalidation', OLD.token); + END IF; + RETURN OLD; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Delete tokens created after migration that have no plaintext (cannot be restored) +DELETE FROM token WHERE token IS NULL; + +-- Make token NOT NULL again +ALTER TABLE token ALTER COLUMN token SET NOT NULL; + +-- Swap PK back: drop token_hash PK, restore token PK +ALTER TABLE token DROP CONSTRAINT token_pkey; +ALTER TABLE token ADD PRIMARY KEY (token); + +-- Re-create the unique index on token_hash (was consumed by ADD CONSTRAINT ... USING INDEX) +CREATE UNIQUE INDEX token_hash_unique ON token (token_hash); diff --git a/backend/migrations/20260316000001_token_hash_pk_swap.up.sql b/backend/migrations/20260316000001_token_hash_pk_swap.up.sql new file mode 100644 index 0000000000000..38e93fb3bad28 --- /dev/null +++ b/backend/migrations/20260316000001_token_hash_pk_swap.up.sql @@ -0,0 +1,22 @@ +-- Step 2: Swap PK and update trigger. +-- All operations here are instant metadata changes (no data/index rebuild). +-- The ACCESS EXCLUSIVE lock is held for only milliseconds. + +-- Swap primary key: drop old, promote existing unique index (instant) +ALTER TABLE token DROP CONSTRAINT token_pkey; +ALTER TABLE token ADD CONSTRAINT token_pkey PRIMARY KEY USING INDEX token_hash_unique; + +-- Make old token column nullable (no longer written for new tokens) +ALTER TABLE token ALTER COLUMN token DROP NOT NULL; + +-- Update the cache invalidation trigger to send prefix instead of plaintext +CREATE OR REPLACE FUNCTION notify_token_invalidation() +RETURNS TRIGGER AS $$ +BEGIN + IF OLD.label = 'session' AND OLD.email IS NOT NULL THEN + INSERT INTO notify_event (channel, payload) + VALUES ('notify_token_invalidation', OLD.token_prefix); + END IF; + RETURN OLD; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; diff --git a/backend/migrations/20260316000002_native_trigger_token_hash.down.sql b/backend/migrations/20260316000002_native_trigger_token_hash.down.sql new file mode 100644 index 0000000000000..c45ef7fd01635 --- /dev/null +++ b/backend/migrations/20260316000002_native_trigger_token_hash.down.sql @@ -0,0 +1,9 @@ +ALTER TABLE native_trigger ADD COLUMN webhook_token_prefix VARCHAR(10) NOT NULL DEFAULT ''; + +-- Backfill prefix from the token table +UPDATE native_trigger nt +SET webhook_token_prefix = t.token_prefix +FROM token t +WHERE t.token_hash = nt.webhook_token_hash; + +ALTER TABLE native_trigger DROP COLUMN IF EXISTS webhook_token_hash; diff --git a/backend/migrations/20260316000002_native_trigger_token_hash.up.sql b/backend/migrations/20260316000002_native_trigger_token_hash.up.sql new file mode 100644 index 0000000000000..fd643ebaa1c9b --- /dev/null +++ b/backend/migrations/20260316000002_native_trigger_token_hash.up.sql @@ -0,0 +1,22 @@ +-- Add webhook_token_hash to native_trigger for safe token lookups/deletes, +-- and drop webhook_token_prefix which is no longer needed. + +ALTER TABLE native_trigger ADD COLUMN webhook_token_hash VARCHAR(64); + +-- Backfill from the token table +UPDATE native_trigger nt +SET webhook_token_hash = t.token_hash +FROM token t +WHERE t.token_prefix = nt.webhook_token_prefix; + +-- Mark orphaned triggers (whose tokens no longer exist) with an error +-- instead of deleting them, so they remain visible in the UI. +-- Use a placeholder hash (sha256 of empty string) that won't match any real token. +UPDATE native_trigger +SET webhook_token_hash = encode(sha256(''::bytea), 'hex'), + error = 'Webhook token not found during migration — re-create this trigger to fix' +WHERE webhook_token_hash IS NULL; + +ALTER TABLE native_trigger ALTER COLUMN webhook_token_hash SET NOT NULL; + +ALTER TABLE native_trigger DROP COLUMN webhook_token_prefix; diff --git a/backend/migrations/20260316000003_rename_token_expiry_notification_column.down.sql b/backend/migrations/20260316000003_rename_token_expiry_notification_column.down.sql new file mode 100644 index 0000000000000..1b4895e88fa2a --- /dev/null +++ b/backend/migrations/20260316000003_rename_token_expiry_notification_column.down.sql @@ -0,0 +1,5 @@ +-- Hashed values cannot be reversed; truncate to avoid silent join mismatches +-- with the old code that compared plaintext token_expiry_notification.token against token.token. +TRUNCATE token_expiry_notification; + +ALTER TABLE token_expiry_notification RENAME COLUMN token_hash TO token; diff --git a/backend/migrations/20260316000003_rename_token_expiry_notification_column.up.sql b/backend/migrations/20260316000003_rename_token_expiry_notification_column.up.sql new file mode 100644 index 0000000000000..fd4186e725532 --- /dev/null +++ b/backend/migrations/20260316000003_rename_token_expiry_notification_column.up.sql @@ -0,0 +1,5 @@ +-- Convert existing plaintext token values to SHA-256 hashes, then rename the column. +UPDATE token_expiry_notification +SET token = encode(sha256(token::bytea), 'hex'); + +ALTER TABLE token_expiry_notification RENAME COLUMN token TO token_hash; diff --git a/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.down.sql b/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.down.sql new file mode 100644 index 0000000000000..9e3c94eea1de9 --- /dev/null +++ b/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.down.sql @@ -0,0 +1,4 @@ +-- Hashing is irreversible so the values will be stale hashes, but the old +-- code's DELETE FROM token WHERE token = $1 is non-fatal — refresh tokens +-- keep working, only old access token cleanup silently fails. +ALTER TABLE mcp_oauth_refresh_token RENAME COLUMN access_token_hash TO access_token; diff --git a/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.up.sql b/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.up.sql new file mode 100644 index 0000000000000..e845eb6527de9 --- /dev/null +++ b/backend/migrations/20260316000004_mcp_oauth_rename_access_token_hash.up.sql @@ -0,0 +1,6 @@ +-- Hash existing plaintext access_token values in mcp_oauth_refresh_token. +-- Only hash rows that are not already 64-char hex strings (safety guard for re-runs). +UPDATE mcp_oauth_refresh_token +SET access_token = encode(sha256(access_token::bytea), 'hex'); + +ALTER TABLE mcp_oauth_refresh_token RENAME COLUMN access_token TO access_token_hash; diff --git a/backend/parsers/windmill-parser-py-imports/tests/fixtures/base.sql b/backend/parsers/windmill-parser-py-imports/tests/fixtures/base.sql index 4b3df785342f9..30629d969df5b 100644 --- a/backend/parsers/windmill-parser-py-imports/tests/fixtures/base.sql +++ b/backend/parsers/windmill-parser-py-imports/tests/fixtures/base.sql @@ -803,7 +803,9 @@ ALTER TABLE public.script OWNER TO postgres; -- CREATE TABLE public.token ( - token character varying(50) NOT NULL, + token_hash character varying(64) NOT NULL, + token_prefix character varying(10) NOT NULL, + token character varying(50), label character varying(50), expiration timestamp with time zone, workspace_id character varying(50), @@ -1209,7 +1211,7 @@ ALTER TABLE ONLY public.script -- ALTER TABLE ONLY public.token - ADD CONSTRAINT token_pkey PRIMARY KEY (token); + ADD CONSTRAINT token_pkey PRIMARY KEY (token_hash); -- @@ -2534,7 +2536,7 @@ INSERT INTO public.usr(workspace_id, email, username, is_admin, role) VALUES INSERT INTO public.workspace_key(workspace_id, kind, key) VALUES ('test-workspace', 'cloud', 'test-key'); -insert INTO public.token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true); +insert INTO public.token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true); INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( 'test-workspace', diff --git a/backend/src/main.rs b/backend/src/main.rs index aa9f16af8953c..24d945993705c 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -1686,7 +1686,7 @@ async fn process_notify_event( } "notify_token_invalidation" => { tracing::info!( - "Token invalidation detected for token: {}...", + "Token invalidation detected for prefix: {}...", payload.get(..8).unwrap_or(payload) ); windmill_api::auth::invalidate_token_from_cache(payload); diff --git a/backend/src/monitor.rs b/backend/src/monitor.rs index 803afabf0d800..429577f3ef91a 100644 --- a/backend/src/monitor.rs +++ b/backend/src/monitor.rs @@ -945,7 +945,7 @@ pub async fn delete_expired_items(db: &DB) -> () { let expired_tokens_r = sqlx::query_as!( TokenRow, "DELETE FROM token WHERE expiration <= now() - RETURNING substring(token for 10) as token_prefix, label, email, workspace_id", + RETURNING token_prefix, label, email, workspace_id", ) .fetch_all(db) .await; @@ -1164,15 +1164,17 @@ pub async fn delete_expired_items(db: &DB) -> () { } pub async fn check_expiring_tokens(db: &DB) { - // Find tokens expiring within 7 days that still have a pending notification row + // Find tokens expiring within 7 days that still have a pending notification row. + // The notification table stores token_hash (not plaintext) so the join works + // even after the hash migration makes token.token nullable. let expiring_tokens_r = sqlx::query_as!( TokenRow, "DELETE FROM token_expiry_notification n USING token t - WHERE n.token = t.token + WHERE n.token_hash = t.token_hash AND n.expiration > now() AND n.expiration <= now() + interval '7 days' - RETURNING substring(t.token for 10) as token_prefix, t.label, t.email, t.workspace_id", + RETURNING t.token_prefix, t.label, t.email, t.workspace_id", ) .fetch_all(db) .await; diff --git a/backend/summarized_schema.txt b/backend/summarized_schema.txt index 2f86dcb322341..5a272d5121884 100644 --- a/backend/summarized_schema.txt +++ b/backend/summarized_schema.txt @@ -115,14 +115,14 @@ kafka_trigger: path(char), kafka_resource_path(char), topics(char), group_id(cha log_file: hostname(char), log_ts(ts), ok_lines(bigint), err_lines(bigint), mode(log_mode), worker_group(char), file_path(char), json_fmt(bool) magic_link: email(char), token(char), expiration(ts) mcp_oauth_client: mcp_server_url(text), client_id(text), client_secret(text), client_secret_expires_at(ts), token_endpoint(text), created_at(ts) -mcp_oauth_refresh_token: id(bigint), refresh_token(char), access_token(char), client_id(char), user_email(char), workspace_id(char), scopes(text[]), token_family(uuid), created_at(ts), expires_at(ts), used_at(ts), revoked(bool) +mcp_oauth_refresh_token: id(bigint), refresh_token(char), access_token_hash(char), client_id(char), user_email(char), workspace_id(char), scopes(text[]), token_family(uuid), created_at(ts), expires_at(ts), used_at(ts), revoked(bool) FK: (client_id) -> mcp_oauth_server_client(client_id) mcp_oauth_server_client: client_id(char), client_name(char), redirect_uris(text[]), created_at(ts) mcp_oauth_server_code: code(char), client_id(char), user_email(char), workspace_id(char), scopes(text[]), redirect_uri(text), code_challenge(char), code_challenge_method(char), created_at(ts), expires_at(ts) FK: (client_id) -> mcp_oauth_server_client(client_id) metrics: id(char), value(jsonb), created_at(ts) mqtt_trigger: mqtt_resource_path(char), subscribe_topics(jsonb[]), client_version(mqtt_client_version), v5_config(jsonb), v3_config(jsonb), client_id(char), path(char), script_path(char), is_flow(bool), workspace_id(char), edited_by(char), email(char), edited_at(ts), extra_perms(jsonb), server_id(char), last_server_ping(ts), error(text), error_handler_path(char), error_handler_args(jsonb), retry(jsonb), mode(trigger_mode) -native_trigger: external_id(char), workspace_id(char), service_name(native_trigger_service), script_path(char), is_flow(bool), webhook_token_prefix(char), service_config(jsonb), error(text), created_at(ts), updated_at(ts) +native_trigger: external_id(char), workspace_id(char), service_name(native_trigger_service), script_path(char), is_flow(bool), webhook_token_hash(char), service_config(jsonb), error(text), created_at(ts), updated_at(ts) FK: (workspace_id) -> workspace(id) nats_trigger: path(char), nats_resource_path(char), subjects(char), stream_name(char), consumer_name(char), use_jetstream(bool), script_path(char), is_flow(bool), workspace_id(char), edited_by(char), email(char), edited_at(ts), extra_perms(jsonb), server_id(char), last_server_ping(ts), error(text), error_handler_path(char), error_handler_args(jsonb), retry(jsonb), mode(trigger_mode) FK: (workspace_id) -> workspace(id) @@ -151,9 +151,9 @@ script: workspace_id(char), hash(bigint), path(char), parent_hashes(bigint[]), s skip_workspace_diff_tally: workspace_id(char), added_at(ts) sqs_trigger: path(char), queue_url(char), aws_resource_path(char), message_attributes(text[]), script_path(char), is_flow(bool), workspace_id(char), edited_by(char), email(char), edited_at(ts), extra_perms(jsonb), error(text), server_id(char), last_server_ping(ts), aws_auth_resource_type(aws_auth_resource_type), error_handler_path(char), error_handler_args(jsonb), retry(jsonb), mode(trigger_mode) FK: (workspace_id) -> workspace(id) -token: token(char), label(char), expiration(ts), workspace_id(char), owner(char), email(char), super_admin(bool), created_at(ts), last_used_at(ts), scopes(text[]), job(uuid) +token: token_hash(char), token_prefix(char), token(char), label(char), expiration(ts), workspace_id(char), owner(char), email(char), super_admin(bool), created_at(ts), last_used_at(ts), scopes(text[]), job(uuid) FK: (workspace_id) -> workspace(id) -token_expiry_notification: token(char), expiration(ts) +token_expiry_notification: token_hash(char), expiration(ts) INDEX: idx_token_expiry_notification_expiration (expiration) tutorial_progress: email(char), progress(bit64), skipped_all(bool) unique_ext_jwt_token: jwt_hash(bigint), last_used_at(ts) diff --git a/backend/tests/fixtures/base.sql b/backend/tests/fixtures/base.sql index 7db9918fba0c1..ce5df2640eb5d 100644 --- a/backend/tests/fixtures/base.sql +++ b/backend/tests/fixtures/base.sql @@ -33,9 +33,11 @@ INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES ('test-workspace', 'test3@windmill.dev', 'test-user-3', false, 'User'); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); +-- NOTE: plaintext `token` column is included for backward compat during transition. +-- Remove it once the `token` column is dropped from the schema. +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_2'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_3'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_admin; GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user; diff --git a/backend/tests/fixtures/end_user_email.sql b/backend/tests/fixtures/end_user_email.sql index 654ad93680e8b..17d459f7dedd2 100644 --- a/backend/tests/fixtures/end_user_email.sql +++ b/backend/tests/fixtures/end_user_email.sql @@ -24,15 +24,17 @@ VALUES ('other-ws@windmill.dev', 'hash', 'password', false, true, 'Other WS User INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES ('other-workspace', 'other-ws@windmill.dev', 'other-ws-user', true, 'Admin'); -INSERT INTO token(token, email, label, super_admin) -VALUES ('OTHER_WS_TOKEN', 'other-ws@windmill.dev', 'other ws token', false); +-- NOTE: plaintext `token` column is included for backward compat during transition. +-- Remove it once the `token` column is dropped from the schema. +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('OTHER_WS_TOKEN'::bytea), 'hex'), 'OTHER_WS_T', 'OTHER_WS_TOKEN', 'other-ws@windmill.dev', 'other ws token', false); -- User not in any workspace INSERT INTO password(email, password_hash, login_type, super_admin, verified, name) VALUES ('no-ws@windmill.dev', 'hash', 'password', false, true, 'No WS User'); -INSERT INTO token(token, email, label, super_admin) -VALUES ('NO_WS_TOKEN', 'no-ws@windmill.dev', 'no ws token', false); +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('NO_WS_TOKEN'::bytea), 'hex'), 'NO_WS_TOKE', 'NO_WS_TOKEN', 'no-ws@windmill.dev', 'no ws token', false); -- Script that returns WM_END_USER_EMAIL (public via extra_perms) INSERT INTO script (workspace_id, created_by, content, schema, summary, description, path, hash, language, lock, kind, extra_perms) diff --git a/backend/tests/fixtures/permissions_test.sql b/backend/tests/fixtures/permissions_test.sql index a9006f5233144..66f406c08cac6 100644 --- a/backend/tests/fixtures/permissions_test.sql +++ b/backend/tests/fixtures/permissions_test.sql @@ -39,13 +39,13 @@ ON CONFLICT (email) DO NOTHING; -- Tokens associated with emails (workspace-scoped) -- The auth system will look up the user by email in the usr table -- Note: tokens must be at least 10 characters (TOKEN_PREFIX_LEN) -INSERT INTO token (token, email, label, super_admin, owner, workspace_id) +INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin, owner, workspace_id) VALUES - ('ADMIN_TOKEN_TEST', 'admin@windmill.dev', 'Admin token', false, 'u/admin', 'test-workspace'), - ('ALICE_TOKEN_TEST', 'alice@windmill.dev', 'Alice token', false, 'u/alice', 'test-workspace'), - ('BOB_TOKEN_TEST12', 'bob@windmill.dev', 'Bob token', false, 'u/bob', 'test-workspace'), - ('CHARLIE_TOKEN_01', 'charlie@windmill.dev', 'Charlie token', false, 'u/charlie', 'test-workspace'), - ('OPERATOR_TOKEN_1', 'operator@windmill.dev', 'Operator token', false, 'u/operator', 'test-workspace'); + (encode(sha256('ADMIN_TOKEN_TEST'::bytea), 'hex'), 'ADMIN_TOKE', 'ADMIN_TOKEN_TEST', 'admin@windmill.dev', 'Admin token', false, 'u/admin', 'test-workspace'), + (encode(sha256('ALICE_TOKEN_TEST'::bytea), 'hex'), 'ALICE_TOKE', 'ALICE_TOKEN_TEST', 'alice@windmill.dev', 'Alice token', false, 'u/alice', 'test-workspace'), + (encode(sha256('BOB_TOKEN_TEST12'::bytea), 'hex'), 'BOB_TOKEN_', 'BOB_TOKEN_TEST12', 'bob@windmill.dev', 'Bob token', false, 'u/bob', 'test-workspace'), + (encode(sha256('CHARLIE_TOKEN_01'::bytea), 'hex'), 'CHARLIE_TO', 'CHARLIE_TOKEN_01', 'charlie@windmill.dev', 'Charlie token', false, 'u/charlie', 'test-workspace'), + (encode(sha256('OPERATOR_TOKEN_1'::bytea), 'hex'), 'OPERATOR_T', 'OPERATOR_TOKEN_1', 'operator@windmill.dev', 'Operator token', false, 'u/operator', 'test-workspace'); -- ============================================ -- GROUPS diff --git a/backend/tests/fixtures/preserve_on_behalf_of.sql b/backend/tests/fixtures/preserve_on_behalf_of.sql index 514c554960655..7467843e80a49 100644 --- a/backend/tests/fixtures/preserve_on_behalf_of.sql +++ b/backend/tests/fixtures/preserve_on_behalf_of.sql @@ -65,14 +65,20 @@ INSERT INTO usr_to_group(workspace_id, group_, usr) VALUES ('test-workspace', 'wm_deployers', 'deployer-user') ON CONFLICT DO NOTHING; --- Tokens for all users -INSERT INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true) +-- Tokens for all users (token_hash = sha256 hex, token_prefix = first 10 chars) +-- NOTE: plaintext `token` column is included for backward compat during transition. +-- Remove it once the `token` column is dropped from the schema. +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true) ON CONFLICT DO NOTHING; -INSERT INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false) +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('SECRET_TOKEN_2'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false) ON CONFLICT DO NOTHING; -INSERT INTO token(token, email, label, super_admin) VALUES ('DEPLOYER_TOKEN', 'deployer@windmill.dev', 'deployer token', false) +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('DEPLOYER_TOKEN'::bytea), 'hex'), 'DEPLOYER_T', 'DEPLOYER_TOKEN', 'deployer@windmill.dev', 'deployer token', false) ON CONFLICT DO NOTHING; -INSERT INTO token(token, email, label, super_admin) VALUES ('ORIGINAL_TOKEN', 'original@windmill.dev', 'original token', false) +INSERT INTO token(token_hash, token_prefix, token, email, label, super_admin) +VALUES (encode(sha256('ORIGINAL_TOKEN'::bytea), 'hex'), 'ORIGINAL_T', 'ORIGINAL_TOKEN', 'original@windmill.dev', 'original token', false) ON CONFLICT DO NOTHING; GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_admin; diff --git a/backend/windmill-api-auth/src/auth.rs b/backend/windmill-api-auth/src/auth.rs index af212dc315ccd..88c3b0cf0b594 100644 --- a/backend/windmill-api-auth/src/auth.rs +++ b/backend/windmill-api-auth/src/auth.rs @@ -26,7 +26,9 @@ use tokio::sync::RwLock; use windmill_common::DB; use windmill_common::{ - auth::{get_folders_for_user, get_groups_for_user, JWTAuthClaims, TOKEN_PREFIX_LEN}, + auth::{ + get_folders_for_user, get_groups_for_user, hash_token, safe_token_prefix, JWTAuthClaims, + }, error::{Error, JsonResult}, jwt, users::{COOKIE_NAME, SUPERADMIN_SECRET_EMAIL}, @@ -42,13 +44,14 @@ lazy_static::lazy_static! { /// Get email from a valid token, with caching. /// Used for WM_END_USER_EMAIL when user is authenticated but not a workspace member. async fn get_email_from_token(db: &DB, token: &str) -> Option { - if let Some(cached) = TOKEN_EMAIL_CACHE.get(token) { + let t_hash = hash_token(token); + if let Some(cached) = TOKEN_EMAIL_CACHE.get(&t_hash) { return cached; } let email = sqlx::query_scalar!( - "SELECT email FROM token WHERE token = $1 AND (expiration > NOW() OR expiration IS NULL)", - token + "SELECT email FROM token WHERE token_hash = $1 AND (expiration > NOW() OR expiration IS NULL)", + t_hash ) .fetch_optional(db) .await @@ -56,7 +59,7 @@ async fn get_email_from_token(db: &DB, token: &str) -> Option { .flatten() .flatten(); // email column is nullable, so we get Option> - TOKEN_EMAIL_CACHE.insert(token.to_string(), email.clone()); + TOKEN_EMAIL_CACHE.insert(t_hash, email.clone()); email } @@ -75,13 +78,15 @@ pub async fn get_end_user_email( } None } -// Global function to invalidate a specific token from cache -pub fn invalidate_token_from_cache(token: &str) { - // Remove all cache entries for this token (across all workspaces) - AUTH_CACHE.retain(|(_workspace_id, cached_token), _cached_value| cached_token != token); +// Global function to invalidate tokens from cache by prefix +pub fn invalidate_token_from_cache(token_prefix: &str) { + // Remove all cache entries whose raw token starts with this prefix (across all workspaces) + AUTH_CACHE.retain(|(_workspace_id, cached_token), _cached_value| { + !cached_token.starts_with(token_prefix) + }); tracing::info!( - "Invalidated token from auth cache: {}...", - &token[..token.len().min(8)] + "Invalidated token(s) from auth cache with prefix: {}...", + &token_prefix[..token_prefix.len().min(8)] ); } @@ -211,13 +216,14 @@ impl AuthCache { } } _ => { + let t_hash = hash_token(token); let user_o = sqlx::query!( "UPDATE token SET last_used_at = now() WHERE - token = $1 + token_hash = $1 AND (expiration > NOW() OR expiration IS NULL) AND (workspace_id IS NULL OR workspace_id = $2) RETURNING owner, email, super_admin, scopes, label", - token, + t_hash, w_id.as_ref(), ) .map(|x| (x.owner, x.email, x.super_admin, x.scopes, x.label)) @@ -275,7 +281,7 @@ impl AuthCache { scopes: None, username_override, token_prefix: Some( - token[0..TOKEN_PREFIX_LEN].to_string(), + safe_token_prefix(token), ), }) } else { @@ -299,7 +305,7 @@ impl AuthCache { scopes: None, username_override, token_prefix: Some( - token[0..TOKEN_PREFIX_LEN].to_string(), + safe_token_prefix(token), ), }) } @@ -315,7 +321,7 @@ impl AuthCache { folders, scopes: None, username_override, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(safe_token_prefix(token)), }) } } @@ -364,7 +370,7 @@ impl AuthCache { scopes, username_override, token_prefix: Some( - token[0..TOKEN_PREFIX_LEN].to_string(), + safe_token_prefix(token), ), }) } @@ -378,7 +384,7 @@ impl AuthCache { scopes, username_override, token_prefix: Some( - token[0..TOKEN_PREFIX_LEN].to_string(), + safe_token_prefix(token), ), }), None => None, @@ -393,7 +399,7 @@ impl AuthCache { folders: Vec::new(), scopes, username_override, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(safe_token_prefix(token)), }) } } @@ -427,7 +433,7 @@ impl AuthCache { folders: Vec::new(), scopes: None, username_override: None, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(safe_token_prefix(token)), }; Some(OptJobAuthed { authed, job_id: None }) } else { @@ -717,7 +723,7 @@ fn username_override_from_label(label: Option) -> Option { #[derive(FromRow, Serialize)] pub struct TruncatedTokenWithEmail { pub label: Option, - pub token_prefix: Option, + pub token_prefix: String, pub expiration: Option>, pub created_at: chrono::DateTime, pub last_used_at: chrono::DateTime, @@ -736,7 +742,7 @@ pub async fn list_tokens_internal( TruncatedTokenWithEmail, r#" SELECT label, - concat(substring(token for 10)) AS token_prefix, + token_prefix, expiration, created_at, last_used_at, @@ -759,7 +765,7 @@ pub async fn list_tokens_internal( TruncatedTokenWithEmail, r#" SELECT label, - concat(substring(token for 10)) AS token_prefix, + token_prefix, expiration, created_at, last_used_at, diff --git a/backend/windmill-api-auth/src/lib.rs b/backend/windmill-api-auth/src/lib.rs index 557ce7f70674e..aceef77e01418 100644 --- a/backend/windmill-api-auth/src/lib.rs +++ b/backend/windmill-api-auth/src/lib.rs @@ -18,7 +18,10 @@ use http::request::Parts; use windmill_audit::audit_oss::AuditAuthorable; use windmill_common::{ - auth::{fetch_authed_from_permissioned_as, is_devops_email, is_super_admin_email}, + auth::{ + fetch_authed_from_permissioned_as, hash_token, is_devops_email, is_super_admin_email, + TOKEN_PREFIX_LEN, + }, db::{Authable, Authed, AuthedRef}, error::{self, Error, Result}, users::username_to_permissioned_as, @@ -511,9 +514,20 @@ pub async fn create_token_internal( ) -> Result { use tracing::Instrument; use windmill_audit::{audit_oss::audit_log, ActionKind}; - use windmill_common::{utils::rd_string, worker::CLOUD_HOSTED}; + use windmill_common::{ + min_version::MIN_VERSION_SUPPORTS_TOKEN_HASH, utils::rd_string, worker::CLOUD_HOSTED, + }; let token = rd_string(32); + let t_hash = hash_token(&token); + let t_prefix = token.get(..TOKEN_PREFIX_LEN).unwrap_or(&token); + + // Write plaintext token column until all workers support hash-based lookup + let plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&token) + }; let is_super_admin = sqlx::query_scalar!( "SELECT super_admin FROM password WHERE email = $1", @@ -536,12 +550,14 @@ pub async fn create_token_internal( } let rows = sqlx::query!( "INSERT INTO token - (token, email, label, expiration, super_admin, scopes, workspace_id) - SELECT $1, $2, $3, $4, $5, $6, $7 - WHERE $7::varchar IS NULL OR NOT EXISTS( - SELECT 1 FROM workspace WHERE id = $7 AND deleted = true + (token_hash, token_prefix, token, email, label, expiration, super_admin, scopes, workspace_id) + SELECT $1, $2, $3, $4, $5, $6, $7, $8, $9 + WHERE $9::varchar IS NULL OR NOT EXISTS( + SELECT 1 FROM workspace WHERE id = $9 AND deleted = true )", - token, + t_hash, + t_prefix, + plaintext as Option<&str>, authed.email, token_config.label, token_config.expiration, @@ -559,7 +575,7 @@ pub async fn create_token_internal( register_token_expiry_notification( &mut *tx, - &token, + &t_hash, token_config.label.as_deref(), token_config.expiration, ) @@ -571,7 +587,7 @@ pub async fn create_token_internal( "users.token.create", ActionKind::Create, &"global", - Some(&token[0..10]), + Some(t_prefix), None, ) .instrument(tracing::info_span!("token", email = &authed.email)) @@ -581,12 +597,14 @@ pub async fn create_token_internal( } /// Insert a pending expiry notification row for user tokens that have an expiration. +/// Stores the token_hash so the join in check_expiring_tokens works even when +/// the plaintext token column is NULL (after hash migration). /// When updating this filter, also update: /// - `is_user_token` in src/monitor.rs /// - `isUserToken` in frontend/src/lib/components/settings/TokensTable.svelte pub async fn register_token_expiry_notification( tx: &mut sqlx::PgConnection, - token: &str, + token_hash: &str, label: Option<&str>, expiration: Option>, ) { @@ -602,8 +620,8 @@ pub async fn register_token_expiry_notification( return; } if let Err(e) = sqlx::query!( - "INSERT INTO token_expiry_notification (token, expiration) VALUES ($1, $2) ON CONFLICT DO NOTHING", - token, + "INSERT INTO token_expiry_notification (token_hash, expiration) VALUES ($1, $2) ON CONFLICT DO NOTHING", + token_hash, expiration, ) .execute(&mut *tx) diff --git a/backend/windmill-api-integration-tests/tests/fixtures/base.sql b/backend/windmill-api-integration-tests/tests/fixtures/base.sql index 7db9918fba0c1..ce5df2640eb5d 100644 --- a/backend/windmill-api-integration-tests/tests/fixtures/base.sql +++ b/backend/windmill-api-integration-tests/tests/fixtures/base.sql @@ -33,9 +33,11 @@ INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES ('test-workspace', 'test3@windmill.dev', 'test-user-3', false, 'User'); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); +-- NOTE: plaintext `token` column is included for backward compat during transition. +-- Remove it once the `token` column is dropped from the schema. +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_2'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_3'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_admin; GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user; diff --git a/backend/windmill-api-integration-tests/tests/fixtures/permissions_test.sql b/backend/windmill-api-integration-tests/tests/fixtures/permissions_test.sql index a9006f5233144..66f406c08cac6 100644 --- a/backend/windmill-api-integration-tests/tests/fixtures/permissions_test.sql +++ b/backend/windmill-api-integration-tests/tests/fixtures/permissions_test.sql @@ -39,13 +39,13 @@ ON CONFLICT (email) DO NOTHING; -- Tokens associated with emails (workspace-scoped) -- The auth system will look up the user by email in the usr table -- Note: tokens must be at least 10 characters (TOKEN_PREFIX_LEN) -INSERT INTO token (token, email, label, super_admin, owner, workspace_id) +INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin, owner, workspace_id) VALUES - ('ADMIN_TOKEN_TEST', 'admin@windmill.dev', 'Admin token', false, 'u/admin', 'test-workspace'), - ('ALICE_TOKEN_TEST', 'alice@windmill.dev', 'Alice token', false, 'u/alice', 'test-workspace'), - ('BOB_TOKEN_TEST12', 'bob@windmill.dev', 'Bob token', false, 'u/bob', 'test-workspace'), - ('CHARLIE_TOKEN_01', 'charlie@windmill.dev', 'Charlie token', false, 'u/charlie', 'test-workspace'), - ('OPERATOR_TOKEN_1', 'operator@windmill.dev', 'Operator token', false, 'u/operator', 'test-workspace'); + (encode(sha256('ADMIN_TOKEN_TEST'::bytea), 'hex'), 'ADMIN_TOKE', 'ADMIN_TOKEN_TEST', 'admin@windmill.dev', 'Admin token', false, 'u/admin', 'test-workspace'), + (encode(sha256('ALICE_TOKEN_TEST'::bytea), 'hex'), 'ALICE_TOKE', 'ALICE_TOKEN_TEST', 'alice@windmill.dev', 'Alice token', false, 'u/alice', 'test-workspace'), + (encode(sha256('BOB_TOKEN_TEST12'::bytea), 'hex'), 'BOB_TOKEN_', 'BOB_TOKEN_TEST12', 'bob@windmill.dev', 'Bob token', false, 'u/bob', 'test-workspace'), + (encode(sha256('CHARLIE_TOKEN_01'::bytea), 'hex'), 'CHARLIE_TO', 'CHARLIE_TOKEN_01', 'charlie@windmill.dev', 'Charlie token', false, 'u/charlie', 'test-workspace'), + (encode(sha256('OPERATOR_TOKEN_1'::bytea), 'hex'), 'OPERATOR_T', 'OPERATOR_TOKEN_1', 'operator@windmill.dev', 'Operator token', false, 'u/operator', 'test-workspace'); -- ============================================ -- GROUPS diff --git a/backend/windmill-api-integration-tests/tests/permissions.rs b/backend/windmill-api-integration-tests/tests/permissions.rs index 4342c612353cb..c333cf924b2a6 100644 --- a/backend/windmill-api-integration-tests/tests/permissions.rs +++ b/backend/windmill-api-integration-tests/tests/permissions.rs @@ -517,9 +517,14 @@ async fn test_group_permission_inheritance(db: Pool) -> anyhow::Result // for (workspace_id, token) tuples. Since we can't easily clear it from tests, // we use a different token or wait for cache expiry. For this test, we create // a new token for Charlie. + let charlie_token = "CHARLIE_TOKEN_NEW"; + let charlie_token_hash = windmill_common::utils::calculate_hash(charlie_token); + let charlie_token_prefix = &charlie_token[..10.min(charlie_token.len())]; sqlx::query!( - "INSERT INTO token (token, email, label, super_admin, owner, workspace_id) - VALUES ('CHARLIE_TOKEN_NEW', 'charlie@windmill.dev', 'Charlie new token', false, 'u/charlie', 'test-workspace')" + "INSERT INTO token (token_hash, token_prefix, email, label, super_admin, owner, workspace_id) + VALUES ($1, $2, 'charlie@windmill.dev', 'Charlie new token', false, 'u/charlie', 'test-workspace')", + charlie_token_hash, + charlie_token_prefix, ) .execute(&db) .await?; @@ -563,28 +568,100 @@ async fn test_all_item_types_permissions(db: Pool) -> anyhow::Result<( let bob_client = create_client_for_user(port, "BOB_TOKEN_TEST12").await; // Test Scripts - uses /scripts/get/p/{path} - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/scripts/get/p/u/alice/my_script")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/scripts/get/p/u/alice/my_script")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/scripts/get/p/u/alice/my_script") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/scripts/get/p/u/alice/my_script") + ) + .await + ); // Test Flows - uses /flows/get/{path} (no /p/) - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/flows/get/u/alice/my_flow")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/flows/get/u/alice/my_flow")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/flows/get/u/alice/my_flow") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/flows/get/u/alice/my_flow") + ) + .await + ); // Test Resources - uses /resources/get/{path} (no /p/) - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/resources/get/u/alice/my_resource")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/resources/get/u/alice/my_resource")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/resources/get/u/alice/my_resource") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/resources/get/u/alice/my_resource") + ) + .await + ); // Test Variables - uses /variables/get/{path} (no /p/) - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/variables/get/u/alice/my_variable")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/variables/get/u/alice/my_variable")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/variables/get/u/alice/my_variable") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/variables/get/u/alice/my_variable") + ) + .await + ); // Test Schedules - uses /schedules/get/{path} (no /p/) - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/schedules/get/u/alice/my_schedule")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/schedules/get/u/alice/my_schedule")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/schedules/get/u/alice/my_schedule") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/schedules/get/u/alice/my_schedule") + ) + .await + ); // Test Apps - uses /apps/get/p/{path} - assert!(can_read(&alice_client, &format!("{base_url}/w/test-workspace/apps/get/p/u/alice/my_app")).await); - assert!(!can_read(&bob_client, &format!("{base_url}/w/test-workspace/apps/get/p/u/alice/my_app")).await); + assert!( + can_read( + &alice_client, + &format!("{base_url}/w/test-workspace/apps/get/p/u/alice/my_app") + ) + .await + ); + assert!( + !can_read( + &bob_client, + &format!("{base_url}/w/test-workspace/apps/get/p/u/alice/my_app") + ) + .await + ); Ok(()) } @@ -747,11 +824,9 @@ async fn test_operator_cannot_create_update(db: Pool) -> anyhow::Resul .await?; // Update app versions - sqlx::query!( - "UPDATE app SET versions = ARRAY[3001::bigint] WHERE id = 3001" - ) - .execute(&db) - .await?; + sqlx::query!("UPDATE app SET versions = ARRAY[3001::bigint] WHERE id = 3001") + .execute(&db) + .await?; let update_app = json!({ "path": "u/operator/existing_app", diff --git a/backend/windmill-api-integration-tests/tests/token_hash.rs b/backend/windmill-api-integration-tests/tests/token_hash.rs new file mode 100644 index 0000000000000..aa7dd22286a9e --- /dev/null +++ b/backend/windmill-api-integration-tests/tests/token_hash.rs @@ -0,0 +1,399 @@ +//! Tests for the token hash migration. +//! +//! Verifies that: +//! - Rust hash_token() matches PostgreSQL's encode(sha256(...),'hex') +//! - Newly created tokens can authenticate immediately +//! - Token list/delete-by-prefix works with the new token_prefix column +//! - Logout invalidates tokens via hash-based deletion +//! - Backward compat: plaintext column is populated when old workers exist +//! - rotate_webhook_token produces valid tokens and defers old token deletion + +use serde_json::json; +use sqlx::{Pool, Postgres}; +use windmill_common::auth::{hash_token, TOKEN_PREFIX_LEN}; +use windmill_test_utils::*; + +fn client() -> reqwest::Client { + reqwest::Client::new() +} + +fn authed(builder: reqwest::RequestBuilder) -> reqwest::RequestBuilder { + builder.header("Authorization", "Bearer SECRET_TOKEN") +} + +fn authed_with(builder: reqwest::RequestBuilder, token: &str) -> reqwest::RequestBuilder { + builder.header("Authorization", format!("Bearer {}", token)) +} + +/// Test 1: Verify that Rust's hash_token() produces the same hash as PostgreSQL's +/// encode(sha256(token::bytea), 'hex'). This is the foundational invariant. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_hash_consistency(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + + // Compute hash in Rust + let rust_hash = hash_token("SECRET_TOKEN"); + + // Compute hash in PostgreSQL + let pg_hash: String = + sqlx::query_scalar!("SELECT encode(sha256('SECRET_TOKEN'::bytea), 'hex') AS hash") + .fetch_one(&db) + .await? + .unwrap(); + + assert_eq!( + rust_hash, pg_hash, + "Rust hash_token() must match PostgreSQL sha256()" + ); + + // Also verify it matches what's stored in the fixture + let stored_hash: String = sqlx::query_scalar!( + "SELECT token_hash FROM token WHERE email = 'test@windmill.dev' AND label = 'test token'" + ) + .fetch_one(&db) + .await?; + + assert_eq!( + rust_hash, stored_hash, + "hash_token() must match the fixture's pre-computed hash" + ); + + Ok(()) +} + +/// Test 2: Create a token via API, then immediately use it to authenticate. +/// Verifies create_token_internal stores the hash correctly and auth lookups work. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_create_token_and_auth(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + let server = ApiServer::start(db.clone()).await?; + let port = server.addr.port(); + let base = format!("http://localhost:{port}/api/users"); + + // Create a new token + let resp = authed(client().post(format!("{base}/tokens/create"))) + .json(&json!({"label": "test-hash-token"})) + .send() + .await?; + assert_eq!(resp.status(), 201); + let new_token = resp.text().await?; + assert!(!new_token.is_empty()); + + // Use the new token to call whoami + let resp = authed_with(client().get(format!("{base}/whoami")), &new_token) + .send() + .await?; + assert_eq!(resp.status(), 200, "newly created token must authenticate"); + let body = resp.json::().await?; + assert_eq!(body["email"], "test@windmill.dev"); + + // Verify the hash is stored correctly in DB + let expected_hash = hash_token(&new_token); + let db_hash: Option = sqlx::query_scalar!( + "SELECT token_hash FROM token WHERE token_hash = $1", + expected_hash + ) + .fetch_optional(&db) + .await?; + assert!(db_hash.is_some(), "token_hash must be stored in DB"); + + Ok(()) +} + +/// Test 3: Create a token, list tokens (verify prefix), delete by prefix, confirm invalid. +/// Covers the change from WHERE token LIKE to WHERE token_prefix = $1. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_token_list_and_delete_by_prefix(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + let server = ApiServer::start(db.clone()).await?; + let port = server.addr.port(); + let base = format!("http://localhost:{port}/api/users"); + + // Create a token + let resp = authed(client().post(format!("{base}/tokens/create"))) + .json(&json!({"label": "prefix-test-token"})) + .send() + .await?; + assert_eq!(resp.status(), 201); + let new_token = resp.text().await?; + let prefix = &new_token[..TOKEN_PREFIX_LEN]; + + // List tokens and find our token by prefix + let resp = authed(client().get(format!("{base}/tokens/list"))) + .send() + .await?; + assert_eq!(resp.status(), 200); + let tokens = resp.json::>().await?; + let found = tokens + .iter() + .any(|t| t["token_prefix"].as_str() == Some(prefix)); + assert!(found, "token with prefix {prefix} must appear in list"); + + // Verify the new token works + let resp = authed_with(client().get(format!("{base}/whoami")), &new_token) + .send() + .await?; + assert_eq!(resp.status(), 200); + + // Delete by prefix + let resp = authed(client().delete(format!("{base}/tokens/delete/{prefix}"))) + .send() + .await?; + assert_eq!(resp.status(), 200, "delete token: {}", resp.text().await?); + + // Confirm the token is gone from the DB (auth cache may still serve 200 briefly) + let token_hash = hash_token(&new_token); + let deleted: bool = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + token_hash + ) + .fetch_one(&db) + .await? + .unwrap_or(true); + assert!( + !deleted, + "token must be deleted from DB after delete-by-prefix" + ); + + Ok(()) +} + +/// Test 4: Logout invalidates a token via hash-based deletion. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_logout_invalidates_token(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + let server = ApiServer::start(db.clone()).await?; + let port = server.addr.port(); + let base = format!("http://localhost:{port}/api/users"); + let auth_base = format!("http://localhost:{port}/api/auth"); + + // Create a fresh token (don't burn the fixture token) + let resp = authed(client().post(format!("{base}/tokens/create"))) + .json(&json!({"label": "logout-test-token"})) + .send() + .await?; + assert_eq!(resp.status(), 201); + let token = resp.text().await?; + + // Verify it works + let resp = authed_with(client().get(format!("{base}/whoami")), &token) + .send() + .await?; + assert_eq!(resp.status(), 200); + + // Logout with the token + let resp = authed_with(client().post(format!("{auth_base}/logout")), &token) + .send() + .await?; + assert!( + resp.status() == 200 || resp.status() == 303, + "logout: unexpected status {}", + resp.status() + ); + + // Confirm the token is gone from the DB (auth cache may still serve 200 briefly) + let token_hash = hash_token(&token); + let exists: bool = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + token_hash + ) + .fetch_one(&db) + .await? + .unwrap_or(true); + assert!(!exists, "token must be deleted from DB after logout"); + + Ok(()) +} + +/// Test 5: Backward compatibility — plaintext column behavior based on MIN_VERSION. +/// When old workers exist (MIN_VERSION < 1.650.0), plaintext must be written so +/// old workers running WHERE token = $1 can still authenticate. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_plaintext_backward_compat(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + + use windmill_common::min_version::{MIN_VERSION, MIN_VERSION_SUPPORTS_TOKEN_HASH}; + + let server = ApiServer::start(db.clone()).await?; + let port = server.addr.port(); + let base = format!("http://localhost:{port}/api/users"); + + // --- Phase 1: Simulate old workers present (version < 1.650.0) --- + // Set MIN_VERSION to one minor below the token hash feature version + let mut old_version = MIN_VERSION_SUPPORTS_TOKEN_HASH.version().clone(); + old_version.minor -= 1; + *MIN_VERSION.write().await = old_version; + + let resp = authed(client().post(format!("{base}/tokens/create"))) + .json(&json!({"label": "old-worker-compat-token"})) + .send() + .await?; + assert_eq!(resp.status(), 201); + let old_compat_token = resp.text().await?; + let old_compat_hash = hash_token(&old_compat_token); + + // Plaintext should be stored (for old workers) + let plaintext: Option = sqlx::query_scalar!( + "SELECT token FROM token WHERE token_hash = $1", + old_compat_hash + ) + .fetch_one(&db) + .await?; + assert!( + plaintext.is_some(), + "plaintext must be stored when old workers exist" + ); + assert_eq!(plaintext.unwrap(), old_compat_token); + + // Old-style query (what old workers run) must find the token + let old_style_email: Option = sqlx::query_scalar!( + "SELECT email FROM token WHERE token = $1 AND (expiration > NOW() OR expiration IS NULL)", + &old_compat_token + ) + .fetch_optional(&db) + .await? + .flatten(); + assert_eq!( + old_style_email.as_deref(), + Some("test@windmill.dev"), + "old-style WHERE token = $1 must find the token" + ); + + // New-style query must also work + let new_style_email: Option = sqlx::query_scalar!( + "SELECT email FROM token WHERE token_hash = $1 AND (expiration > NOW() OR expiration IS NULL)", + old_compat_hash + ) + .fetch_optional(&db) + .await? + .flatten(); + assert_eq!( + new_style_email.as_deref(), + Some("test@windmill.dev"), + "new-style WHERE token_hash = $1 must also work" + ); + + // --- Phase 2: All workers upgraded (version >= 1.650.0) --- + *MIN_VERSION.write().await = MIN_VERSION_SUPPORTS_TOKEN_HASH.version().clone(); + + let resp = authed(client().post(format!("{base}/tokens/create"))) + .json(&json!({"label": "new-worker-token"})) + .send() + .await?; + assert_eq!(resp.status(), 201); + let new_token = resp.text().await?; + let new_hash = hash_token(&new_token); + + // Plaintext should NOT be stored + let plaintext: Option = + sqlx::query_scalar!("SELECT token FROM token WHERE token_hash = $1", new_hash) + .fetch_one(&db) + .await?; + assert!( + plaintext.is_none(), + "plaintext must be NULL when all workers support hash" + ); + + // Old-style query should NOT find this token + let old_style_result: Option = + sqlx::query_scalar!("SELECT email FROM token WHERE token = $1", &new_token) + .fetch_optional(&db) + .await? + .flatten(); + assert!( + old_style_result.is_none(), + "old-style query must not find token when plaintext is NULL" + ); + + // New-style query must still work + let resp = authed_with(client().get(format!("{base}/whoami")), &new_token) + .send() + .await?; + assert_eq!( + resp.status(), + 200, + "new token must authenticate via hash lookup" + ); + + Ok(()) +} + +/// Test 6: rotate_webhook_token creates a new token and keeps the old one alive. +/// Callers delete the old token after successfully updating the trigger. +#[sqlx::test(migrations = "../migrations", fixtures("base"))] +async fn test_rotate_webhook_token(db: Pool) -> anyhow::Result<()> { + initialize_tracing().await; + + use windmill_native_triggers::{delete_token_by_hash, rotate_webhook_token}; + + // Insert a token directly with known values + let original_token = "test-webhook-token-original-1234"; + let original_hash = hash_token(original_token); + let original_prefix = &original_token[..TOKEN_PREFIX_LEN]; + + sqlx::query!( + "INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin) + VALUES ($1, $2, $3, 'test@windmill.dev', 'webhook-test', false)", + original_hash, + original_prefix, + original_token, + ) + .execute(&db) + .await?; + + // Rotate the token + let rotated = rotate_webhook_token(&db, &original_hash) + .await? + .expect("rotate must return Some for existing token"); + + // New token should be different + assert_ne!(rotated.new_token, original_token); + assert_eq!(rotated.old_token_hash, original_hash); + + // New token's hash should exist in DB + let new_hash = hash_token(&rotated.new_token); + let exists: bool = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + new_hash + ) + .fetch_one(&db) + .await? + .unwrap_or(false); + assert!(exists, "new token hash must exist in DB after rotation"); + + // Old token should still exist (deletion deferred to caller) + let old_exists: bool = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + original_hash + ) + .fetch_one(&db) + .await? + .unwrap_or(false); + assert!( + old_exists, + "old token must still exist until caller deletes it" + ); + + // Caller deletes old token after successful trigger update + let deleted = delete_token_by_hash(&db, &rotated.old_token_hash).await?; + assert!(deleted, "old token must be deletable"); + + // Old token should now be gone + let old_gone: bool = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM token WHERE token_hash = $1) AS exists", + original_hash + ) + .fetch_one(&db) + .await? + .unwrap_or(true); + assert!(!old_gone, "old token must be gone after explicit deletion"); + + // Rotating a non-existent hash should return None + let result = rotate_webhook_token(&db, "nonexistent_hash").await?; + assert!( + result.is_none(), + "rotating a non-existent token must return None" + ); + + Ok(()) +} diff --git a/backend/windmill-api-users/src/users.rs b/backend/windmill-api-users/src/users.rs index f7409169a9281..b911739eaaa40 100644 --- a/backend/windmill-api-users/src/users.rs +++ b/backend/windmill-api-users/src/users.rs @@ -42,7 +42,7 @@ use tracing::Instrument; use windmill_audit::audit_oss::audit_log; use windmill_audit::ActionKind; use windmill_common::audit::AuditAuthor; -use windmill_common::auth::TOKEN_PREFIX_LEN; +use windmill_common::auth::{safe_token_prefix, TOKEN_PREFIX_LEN}; use windmill_common::global_settings::AUTOMATE_USERNAME_CREATION_SETTING; use windmill_common::oauth2::InstanceEvent; use windmill_common::users::truncate_token; @@ -235,7 +235,7 @@ pub struct EditLoginType { #[derive(FromRow, Serialize)] pub struct TruncatedToken { pub label: Option, - pub token_prefix: Option, + pub token_prefix: String, pub expiration: Option>, pub created_at: chrono::DateTime, pub last_used_at: chrono::DateTime, @@ -527,23 +527,28 @@ async fn logout( } cookies.remove(cookie); let mut tx = db.begin().await?; + let t_hash = windmill_common::auth::hash_token(&token); + let t_prefix = token.get(..TOKEN_PREFIX_LEN).unwrap_or(&token); let email = if *INVALIDATE_ALL_SESSIONS_ON_LOGOUT { sqlx::query_scalar!( "WITH email_lookup AS ( - SELECT email FROM token WHERE token = $1 + SELECT email FROM token WHERE token_hash = $1 ) DELETE FROM token WHERE email = (SELECT email FROM email_lookup) AND label = 'session' RETURNING email", - token + t_hash ) .fetch_optional(&mut *tx) .await? } else { - sqlx::query_scalar!("DELETE FROM token WHERE token = $1 RETURNING email", token) - .fetch_optional(&mut *tx) - .await? + sqlx::query_scalar!( + "DELETE FROM token WHERE token_hash = $1 RETURNING email", + t_hash + ) + .fetch_optional(&mut *tx) + .await? }; if let Some(email) = email { @@ -559,7 +564,7 @@ async fn logout( email: email.clone(), username: email, username_override: None, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(t_prefix.to_string()), }, audit_message, ActionKind::Delete, @@ -1643,7 +1648,7 @@ async fn login( email: email.clone(), username: email.clone(), username_override: None, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(safe_token_prefix(&token)), }; audit_log( @@ -1689,7 +1694,8 @@ async fn refresh_token( let mut tx = db.begin().await?; if let Some(thresh_s) = query.if_expiring_in_less_than_s { - let not_expired = sqlx::query_scalar!("SELECT true FROM token WHERE token = $1 and expiration IS NOT NULL and expiration > now() + $2::int * '1 sec'::interval", &token, thresh_s) + let t_hash = windmill_common::auth::hash_token(&token); + let not_expired = sqlx::query_scalar!("SELECT true FROM token WHERE token_hash = $1 and expiration IS NOT NULL and expiration > now() + $2::int * '1 sec'::interval", &t_hash, thresh_s) .fetch_optional(&db) .await? .flatten() @@ -1740,7 +1746,16 @@ pub async fn create_session_token<'c>( tx: &mut sqlx::Transaction<'c, sqlx::Postgres>, cookies: Cookies, ) -> Result { + use windmill_common::min_version::MIN_VERSION_SUPPORTS_TOKEN_HASH; + let token = rd_string(32); + let t_hash = windmill_common::auth::hash_token(&token); + let t_prefix = token.get(..TOKEN_PREFIX_LEN).unwrap_or(&token); + let plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&token) + }; if *INVALIDATE_OLD_SESSIONS { sqlx::query!( @@ -1756,7 +1771,7 @@ pub async fn create_session_token<'c>( email: email.to_string(), username: email.to_string(), username_override: None, - token_prefix: Some(token[0..TOKEN_PREFIX_LEN].to_string()), + token_prefix: Some(t_prefix.to_string()), }, "users.token.invalidate_old_sessions", ActionKind::Delete, @@ -1770,9 +1785,11 @@ pub async fn create_session_token<'c>( sqlx::query!( "INSERT INTO token - (token, email, label, expiration, super_admin) - VALUES ($1, $2, $3, now() + ($4 || ' seconds')::interval, $5)", - token, + (token_hash, token_prefix, token, email, label, expiration, super_admin) + VALUES ($1, $2, $3, $4, $5, now() + ($6 || ' seconds')::interval, $7)", + t_hash, + t_prefix, + plaintext as Option<&str>, email, "session", &MAX_SESSION_VALIDITY_SECONDS.to_string(), @@ -1817,7 +1834,16 @@ async fn impersonate( authed: ApiAuthed, Json(new_token): Json, ) -> Result<(StatusCode, String)> { + use windmill_common::min_version::MIN_VERSION_SUPPORTS_TOKEN_HASH; + let token = rd_string(32); + let t_hash = windmill_common::auth::hash_token(&token); + let t_prefix = token.get(..TOKEN_PREFIX_LEN).unwrap_or(&token); + let plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&token) + }; require_super_admin(&db, &authed.email).await?; if new_token.impersonate_email.is_none() { @@ -1839,9 +1865,11 @@ async fn impersonate( sqlx::query!( "INSERT INTO token - (token, email, label, expiration, super_admin) - VALUES ($1, $2, $3, $4, $5)", - token, + (token_hash, token_prefix, token, email, label, expiration, super_admin) + VALUES ($1, $2, $3, $4, $5, $6, $7)", + t_hash, + t_prefix, + plaintext as Option<&str>, impersonated, new_token.label, new_token.expiration, @@ -1852,7 +1880,7 @@ async fn impersonate( windmill_api_auth::register_token_expiry_notification( &mut *tx, - &token, + &t_hash, new_token.label.as_deref(), new_token.expiration, ) @@ -1864,7 +1892,7 @@ async fn impersonate( "users.impersonate", ActionKind::Delete, &"global", - Some(&token[0..10]), + Some(t_prefix), Some([("impersonated", &format!("{impersonated}")[..])].into()), ) .instrument(tracing::info_span!("token", email = &impersonated)) @@ -1888,7 +1916,7 @@ async fn list_tokens( let rows = if query.exclude_ephemeral.unwrap_or(false) { sqlx::query_as!( TruncatedToken, - "SELECT label, concat(substring(token for 10)) as token_prefix, expiration, created_at, \ + "SELECT label, token_prefix, expiration, created_at, \ last_used_at, scopes FROM token WHERE email = $1 AND (label != 'ephemeral-script' OR label IS NULL) ORDER BY created_at DESC LIMIT $2 OFFSET $3", email, @@ -1900,7 +1928,7 @@ async fn list_tokens( } else { sqlx::query_as!( TruncatedToken, - "SELECT label, concat(substring(token for 10)) as token_prefix, expiration, created_at, \ + "SELECT label, token_prefix, expiration, created_at, \ last_used_at, scopes FROM token WHERE email = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3", email, @@ -1923,8 +1951,8 @@ async fn delete_token( let tokens_deleted: Vec = sqlx::query_scalar( "DELETE FROM token WHERE email = $1 - AND token LIKE concat($2::text, '%') - RETURNING concat(substring(token for 10), '*****')", + AND token_prefix = $2 + RETURNING concat(token_prefix, '*****')", ) .bind(&authed.email) .bind(&token_prefix) diff --git a/backend/windmill-api-workspaces/src/workspaces.rs b/backend/windmill-api-workspaces/src/workspaces.rs index a85383e496488..2b3146a6fb213 100644 --- a/backend/windmill-api-workspaces/src/workspaces.rs +++ b/backend/windmill-api-workspaces/src/workspaces.rs @@ -3594,7 +3594,7 @@ pub(crate) async fn archive_workspace_impl( // Delete non-session tokens scoped to this workspace let deleted_tokens = sqlx::query_scalar!( - "DELETE FROM token WHERE workspace_id = $1 AND label IS DISTINCT FROM 'session' RETURNING token", + "DELETE FROM token WHERE workspace_id = $1 AND label IS DISTINCT FROM 'session' RETURNING token_prefix", w_id ) .fetch_all(&mut *tx) diff --git a/backend/windmill-api/src/mcp/oauth_server.rs b/backend/windmill-api/src/mcp/oauth_server.rs index 9a81e2efd83e3..ecfada2fcb526 100644 --- a/backend/windmill-api/src/mcp/oauth_server.rs +++ b/backend/windmill-api/src/mcp/oauth_server.rs @@ -10,7 +10,9 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use sqlx::FromRow; use windmill_common::{ + auth::{hash_token, TOKEN_PREFIX_LEN}, error::{Error, Result}, + min_version::MIN_VERSION_SUPPORTS_TOKEN_HASH, utils::rd_string, BASE_URL, DB, }; @@ -209,7 +211,7 @@ struct AuthorizationCode { struct RefreshTokenRow { id: i64, refresh_token: String, - access_token: String, + access_token_hash: String, client_id: String, user_email: String, workspace_id: String, @@ -383,16 +385,25 @@ async fn handle_authorization_code_grant( } let access_token = rd_string(32); + let access_token_hash = hash_token(&access_token); + let access_token_prefix = access_token.get(..TOKEN_PREFIX_LEN).unwrap_or(&access_token); + let plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&access_token) + }; let refresh_token = rd_string(32); let token_family = sqlx::types::Uuid::new_v4(); let scopes = auth_code.scopes; // Create access token (rejects archived workspaces inline) let rows = sqlx::query!( - "INSERT INTO token (token, email, label, expiration, scopes, workspace_id) - SELECT $1::varchar, $2::varchar, $3::varchar, now() + ($4 || ' seconds')::interval, $5::text[], $6::varchar - WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $6 AND deleted = true)", - access_token, + "INSERT INTO token (token_hash, token_prefix, token, email, label, expiration, scopes, workspace_id) + SELECT $1::varchar, $2::varchar, $3::varchar, $4::varchar, $5::varchar, now() + ($6 || ' seconds')::interval, $7::text[], $8::varchar + WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $8 AND deleted = true)", + access_token_hash, + access_token_prefix, + plaintext as Option<&str>, auth_code.user_email, format!("mcp-oauth-{}", auth_code.client_id), MCP_OAUTH_TOKEN_EXPIRATION_SECS.to_string(), @@ -411,13 +422,13 @@ async fn handle_authorization_code_grant( )); } - // Create refresh token + // Create refresh token — store the hash of the access token so we can delete it later let refresh_token_result = sqlx::query!( "INSERT INTO mcp_oauth_refresh_token - (refresh_token, access_token, client_id, user_email, workspace_id, scopes, token_family, expires_at) + (refresh_token, access_token_hash, client_id, user_email, workspace_id, scopes, token_family, expires_at) VALUES ($1, $2, $3, $4, $5, $6, $7, now() + ($8 || ' seconds')::interval)", refresh_token, - access_token, + access_token_hash, auth_code.client_id, auth_code.user_email, auth_code.workspace_id, @@ -469,7 +480,7 @@ async fn handle_refresh_token_grant( AND used_at IS NULL AND NOT revoked AND expires_at > now() - RETURNING id, refresh_token, access_token, client_id, user_email, workspace_id, + RETURNING id, refresh_token, access_token_hash, client_id, user_email, workspace_id, scopes, token_family, created_at, expires_at, used_at, revoked", refresh_token_value, req.client_id @@ -504,10 +515,13 @@ async fn handle_refresh_token_grant( } }; - // Delete old access token - if let Err(e) = sqlx::query!("DELETE FROM token WHERE token = $1", token_row.access_token) - .execute(db) - .await + // Delete old access token using the stored hash + if let Err(e) = sqlx::query!( + "DELETE FROM token WHERE token_hash = $1", + token_row.access_token_hash + ) + .execute(db) + .await { tracing::error!("Failed to delete old access token: {}", e); // Non-fatal, continue with token creation @@ -515,15 +529,24 @@ async fn handle_refresh_token_grant( // Generate new tokens let new_access_token = rd_string(32); + let new_access_token_hash = hash_token(&new_access_token); + let new_access_token_prefix = new_access_token.get(..TOKEN_PREFIX_LEN).unwrap_or(&new_access_token); + let new_plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&new_access_token) + }; let new_refresh_token = rd_string(32); let scopes = token_row.scopes; // Create new access token (rejects archived workspaces inline) let rows = sqlx::query!( - "INSERT INTO token (token, email, label, expiration, scopes, workspace_id) - SELECT $1::varchar, $2::varchar, $3::varchar, now() + ($4 || ' seconds')::interval, $5::text[], $6::varchar - WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $6 AND deleted = true)", - new_access_token, + "INSERT INTO token (token_hash, token_prefix, token, email, label, expiration, scopes, workspace_id) + SELECT $1::varchar, $2::varchar, $3::varchar, $4::varchar, $5::varchar, now() + ($6 || ' seconds')::interval, $7::text[], $8::varchar + WHERE NOT EXISTS(SELECT 1 FROM workspace WHERE id = $8 AND deleted = true)", + new_access_token_hash, + new_access_token_prefix, + new_plaintext as Option<&str>, token_row.user_email, format!("mcp-oauth-{}", token_row.client_id), MCP_OAUTH_TOKEN_EXPIRATION_SECS.to_string(), @@ -542,13 +565,13 @@ async fn handle_refresh_token_grant( )); } - // Create new refresh token (same token family for tracking) + // Create new refresh token (same token family for tracking) — store hash of access token if let Err(e) = sqlx::query!( "INSERT INTO mcp_oauth_refresh_token - (refresh_token, access_token, client_id, user_email, workspace_id, scopes, token_family, expires_at) + (refresh_token, access_token_hash, client_id, user_email, workspace_id, scopes, token_family, expires_at) VALUES ($1, $2, $3, $4, $5, $6, $7, now() + ($8 || ' seconds')::interval)", new_refresh_token, - new_access_token, + new_access_token_hash, token_row.client_id, token_row.user_email, token_row.workspace_id, diff --git a/backend/windmill-api/src/workspaces_export.rs b/backend/windmill-api/src/workspaces_export.rs index 70e676b5b4295..df1c83d6d58d9 100644 --- a/backend/windmill-api/src/workspaces_export.rs +++ b/backend/windmill-api/src/workspaces_export.rs @@ -834,7 +834,7 @@ pub(crate) async fn tarball_workspace( let trigger_str = &to_string_without_metadata( &trigger, false, - Some(vec!["webhook_token_prefix"]), + Some(vec!["webhook_token_hash"]), ) .unwrap(); archive diff --git a/backend/windmill-common/src/auth.rs b/backend/windmill-common/src/auth.rs index 7024029c52488..23604b6e3a117 100644 --- a/backend/windmill-common/src/auth.rs +++ b/backend/windmill-common/src/auth.rs @@ -18,6 +18,12 @@ use crate::{ DB, }; +/// Hash a raw token using SHA-256 (hex-encoded, 64 chars). +/// Used to store and look up tokens without keeping plaintext in the DB. +pub fn hash_token(token: &str) -> String { + crate::utils::calculate_hash(token) +} + #[derive(Debug)] pub struct IdToken { token: String, @@ -26,6 +32,15 @@ pub struct IdToken { pub const TOKEN_PREFIX_LEN: usize = 10; +/// Safely extract the token prefix (first TOKEN_PREFIX_LEN chars). +/// Returns the full token if it's shorter than TOKEN_PREFIX_LEN, preventing panics. +pub fn safe_token_prefix(token: &str) -> String { + token + .get(..TOKEN_PREFIX_LEN) + .unwrap_or(token) + .to_string() +} + lazy_static::lazy_static! { // Cache for script hash permissions - (ApiAuthed hash, script_hash) -> permission result pub static ref HASH_PERMS_CACHE: PermsCache = PermsCache::new(); diff --git a/backend/windmill-common/src/min_version.rs b/backend/windmill-common/src/min_version.rs index fd1769dd55d05..11abecff9ffb0 100644 --- a/backend/windmill-common/src/min_version.rs +++ b/backend/windmill-common/src/min_version.rs @@ -5,6 +5,7 @@ use tokio::sync::RwLock; // ============ Feature Definitions ============ +pub const MIN_VERSION_SUPPORTS_TOKEN_HASH: VC = vc(1, 659, 0, "Token hash storage"); pub const MIN_VERSION_SUPPORTS_SYNC_JOBS_DEBOUNCING: VC = vc(1, 602, 0, "Sync jobs debouncing"); pub const MIN_VERSION_SUPPORTS_DEBOUNCING_V2: VC = vc(1, 597, 0, "Debouncing V2"); pub const MIN_VERSION_IS_AT_LEAST_1_595: VC = vc(1, 595, 0, "Flow status separate table"); diff --git a/backend/windmill-common/tests/fixtures/base.sql b/backend/windmill-common/tests/fixtures/base.sql index 7db9918fba0c1..412fa1029f936 100644 --- a/backend/windmill-common/tests/fixtures/base.sql +++ b/backend/windmill-common/tests/fixtures/base.sql @@ -33,9 +33,9 @@ INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES ('test-workspace', 'test3@windmill.dev', 'test-user-3', false, 'User'); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_2'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_3'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_admin; GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user; diff --git a/backend/windmill-common/tests/notify_events.rs b/backend/windmill-common/tests/notify_events.rs index 36302b8e669c4..b9f639caea796 100644 --- a/backend/windmill-common/tests/notify_events.rs +++ b/backend/windmill-common/tests/notify_events.rs @@ -40,13 +40,20 @@ async fn count_events_for_channel(db: &Pool, channel: &str) -> i64 { #[sqlx::test(migrations = "../migrations", fixtures("base"))] async fn test_get_latest_event_id_returns_valid_id(db: Pool) { // Get current latest id - let latest_id = get_latest_event_id(&db).await.expect("Should get latest event id"); + let latest_id = get_latest_event_id(&db) + .await + .expect("Should get latest event id"); assert!(latest_id >= 0, "Latest id should be non-negative"); // Insert a new event and verify latest_id increases let new_id = insert_test_event(&db, "test_latest_id", "payload").await; - let new_latest_id = get_latest_event_id(&db).await.expect("Should get latest event id"); - assert!(new_latest_id >= new_id, "Latest id should be >= new event id"); + let new_latest_id = get_latest_event_id(&db) + .await + .expect("Should get latest event id"); + assert!( + new_latest_id >= new_id, + "Latest id should be >= new event id" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -55,7 +62,9 @@ async fn test_get_latest_event_id_with_events(db: Pool) { let _id2 = insert_test_event(&db, "test_channel_2", "payload2").await; let id3 = insert_test_event(&db, "test_channel_3", "payload3").await; - let latest_id = get_latest_event_id(&db).await.expect("Should get latest event id"); + let latest_id = get_latest_event_id(&db) + .await + .expect("Should get latest event id"); assert!(latest_id >= id3, "Latest id should be >= last inserted id"); } @@ -65,8 +74,13 @@ async fn test_poll_notify_events_no_new_events(db: Pool) { let latest_id = get_latest_event_id(&db).await.unwrap(); // Poll from the latest id - should return empty since no new events - let events = poll_notify_events(&db, latest_id).await.expect("Should poll events"); - assert!(events.is_empty(), "Should return empty vec when polling from latest id"); + let events = poll_notify_events(&db, latest_id) + .await + .expect("Should poll events"); + assert!( + events.is_empty(), + "Should return empty vec when polling from latest id" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -76,7 +90,9 @@ async fn test_poll_notify_events_returns_new_events(db: Pool) { let _id1 = insert_test_event(&db, "test_poll_channel", "payload1").await; let _id2 = insert_test_event(&db, "test_poll_channel", "payload2").await; - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); assert!(events.len() >= 2, "Should return at least 2 new events"); // Verify the events we inserted are present @@ -87,7 +103,10 @@ async fn test_poll_notify_events_returns_new_events(db: Pool) { assert_eq!(our_events.len(), 2, "Should have exactly our 2 test events"); // Verify ordering (ascending by id) - assert!(our_events[0].id < our_events[1].id, "Events should be ordered by id ascending"); + assert!( + our_events[0].id < our_events[1].id, + "Events should be ordered by id ascending" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -97,14 +116,19 @@ async fn test_poll_notify_events_respects_last_event_id(db: Pool) { let _id3 = insert_test_event(&db, "test_respect_id", "payload3").await; // Poll from id1 should only return id2 and id3 - let events = poll_notify_events(&db, id1).await.expect("Should poll events"); + let events = poll_notify_events(&db, id1) + .await + .expect("Should poll events"); let our_events: Vec<_> = events .iter() .filter(|e| e.channel == "test_respect_id") .collect(); assert_eq!(our_events.len(), 2, "Should only return events after id1"); - assert!(our_events.iter().all(|e| e.id > id1), "All events should have id > id1"); + assert!( + our_events.iter().all(|e| e.id > id1), + "All events should have id > id1" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -124,21 +148,24 @@ async fn test_cleanup_old_events(db: Pool) { .expect("Failed to insert old event"); // Insert a recent event - sqlx::query( - "INSERT INTO notify_event (channel, payload) VALUES ($1, $2)", - ) - .bind(&recent_channel) - .bind("recent_payload") - .execute(&db) - .await - .expect("Failed to insert recent event"); + sqlx::query("INSERT INTO notify_event (channel, payload) VALUES ($1, $2)") + .bind(&recent_channel) + .bind("recent_payload") + .execute(&db) + .await + .expect("Failed to insert recent event"); // Count before cleanup let old_count_before = count_events_for_channel(&db, &old_channel).await; - assert_eq!(old_count_before, 1, "Should have 1 old event before cleanup"); + assert_eq!( + old_count_before, 1, + "Should have 1 old event before cleanup" + ); // Cleanup events older than 10 minutes - let deleted = cleanup_old_events(&db, 10).await.expect("Should cleanup events"); + let deleted = cleanup_old_events(&db, 10) + .await + .expect("Should cleanup events"); assert!(deleted >= 1, "Should delete at least 1 old event"); // Verify old event is gone @@ -167,13 +194,18 @@ async fn test_trigger_notify_config_change(db: Pool) { .await .expect("Failed to insert config"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let config_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_config_change" && e.payload == "test_config_trigger") .collect(); - assert!(!config_events.is_empty(), "Should have notify_config_change event"); + assert!( + !config_events.is_empty(), + "Should have notify_config_change event" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -190,13 +222,18 @@ async fn test_trigger_notify_global_setting_change_insert(db: Pool) { .await .expect("Failed to insert global setting"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let setting_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_global_setting_change" && e.payload == setting_name) .collect(); - assert!(!setting_events.is_empty(), "Should have notify_global_setting_change event on insert"); + assert!( + !setting_events.is_empty(), + "Should have notify_global_setting_change event on insert" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -220,13 +257,18 @@ async fn test_trigger_notify_global_setting_change_update(db: Pool) { .await .expect("Failed to update global setting"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let setting_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_global_setting_change" && e.payload == setting_name) .collect(); - assert!(!setting_events.is_empty(), "Should have notify_global_setting_change event on update"); + assert!( + !setting_events.is_empty(), + "Should have notify_global_setting_change event on update" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -250,13 +292,18 @@ async fn test_trigger_notify_global_setting_change_delete(db: Pool) { .await .expect("Failed to delete global setting"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let setting_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_global_setting_change" && e.payload == setting_name) .collect(); - assert!(!setting_events.is_empty(), "Should have notify_global_setting_change event on delete"); + assert!( + !setting_events.is_empty(), + "Should have notify_global_setting_change event on delete" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -272,13 +319,18 @@ async fn test_trigger_notify_workspace_envs_change(db: Pool) { .await .expect("Failed to insert workspace env"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let env_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_workspace_envs_change" && e.payload == "test-workspace") .collect(); - assert!(!env_events.is_empty(), "Should have notify_workspace_envs_change event"); + assert!( + !env_events.is_empty(), + "Should have notify_workspace_envs_change event" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -294,44 +346,57 @@ async fn test_trigger_notify_workspace_key_change(db: Pool) { .await .expect("Failed to insert workspace key"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let key_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_workspace_key_change" && e.payload == "test-workspace") .collect(); - assert!(!key_events.is_empty(), "Should have notify_workspace_key_change event"); + assert!( + !key_events.is_empty(), + "Should have notify_workspace_key_change event" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] async fn test_trigger_notify_token_invalidation(db: Pool) { - // First insert a session token + // First insert a session token with token_hash and token_prefix let token = format!("test_token_{}", uuid::Uuid::new_v4()); + let token_hash = windmill_common::utils::calculate_hash(&token); + let token_prefix = &token[..10]; sqlx::query( - "INSERT INTO token (token, label, email, workspace_id, owner, expiration) - VALUES ($1, 'session', 'test@test.com', 'test-workspace', 'test-user', now() + interval '1 hour')", + "INSERT INTO token (token_hash, token_prefix, label, email, workspace_id, owner, expiration) + VALUES ($1, $2, 'session', 'test@test.com', 'test-workspace', 'test-user', now() + interval '1 hour')", ) - .bind(&token) + .bind(&token_hash) + .bind(token_prefix) .execute(&db) .await .expect("Failed to insert token"); let before_id = get_latest_event_id(&db).await.unwrap(); - // Delete the token (should trigger notification) - sqlx::query("DELETE FROM token WHERE token = $1") - .bind(&token) + // Delete the token (should trigger notification with prefix) + sqlx::query("DELETE FROM token WHERE token_hash = $1") + .bind(&token_hash) .execute(&db) .await .expect("Failed to delete token"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let token_events: Vec<_> = events .iter() - .filter(|e| e.channel == "notify_token_invalidation" && e.payload == token) + .filter(|e| e.channel == "notify_token_invalidation" && e.payload == token_prefix) .collect(); - assert!(!token_events.is_empty(), "Should have notify_token_invalidation event"); + assert!( + !token_events.is_empty(), + "Should have notify_token_invalidation event" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -344,13 +409,18 @@ async fn test_trigger_notify_webhook_change(db: Pool) { .await .expect("Failed to update webhook"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let webhook_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_webhook_change" && e.payload == "test-workspace") .collect(); - assert!(!webhook_events.is_empty(), "Should have notify_webhook_change event"); + assert!( + !webhook_events.is_empty(), + "Should have notify_webhook_change event" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -363,13 +433,18 @@ async fn test_trigger_notify_workspace_premium_change(db: Pool) { .await .expect("Failed to update workspace premium"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let premium_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_workspace_premium_change" && e.payload == "test-workspace") .collect(); - assert!(!premium_events.is_empty(), "Should have notify_workspace_premium_change event"); + assert!( + !premium_events.is_empty(), + "Should have notify_workspace_premium_change event" + ); } // ============================================================================ @@ -392,14 +467,19 @@ async fn test_trigger_notify_http_trigger_change(db: Pool) { .await .expect("Failed to insert HTTP trigger"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let http_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_http_trigger_change") .filter(|e| e.payload.contains("test-workspace") && e.payload.contains(&trigger_path)) .collect(); - assert!(!http_events.is_empty(), "Should have notify_http_trigger_change event"); + assert!( + !http_events.is_empty(), + "Should have notify_http_trigger_change event" + ); } // ============================================================================ @@ -431,19 +511,27 @@ async fn test_trigger_notify_runnable_version_change_script(db: Pool) .await .expect("Failed to update script lock"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let script_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_runnable_version_change") .filter(|e| e.payload.contains("test-workspace") && e.payload.contains("script")) .collect(); - assert!(!script_events.is_empty(), "Should have notify_runnable_version_change event for script"); + assert!( + !script_events.is_empty(), + "Should have notify_runnable_version_change event for script" + ); // Verify payload format: workspace_id:source_type:path:kind let parts: Vec<&str> = script_events[0].payload.split(':').collect(); assert!(parts.len() >= 4, "Payload should have at least 4 parts"); - assert_eq!(parts[0], "test-workspace", "First part should be workspace_id"); + assert_eq!( + parts[0], "test-workspace", + "First part should be workspace_id" + ); assert_eq!(parts[1], "script", "Second part should be 'script'"); } @@ -472,19 +560,27 @@ async fn test_trigger_notify_runnable_version_change_flow(db: Pool) { .await .expect("Failed to update flow versions"); - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let flow_events: Vec<_> = events .iter() .filter(|e| e.channel == "notify_runnable_version_change") .filter(|e| e.payload.contains("test-workspace") && e.payload.contains("flow")) .collect(); - assert!(!flow_events.is_empty(), "Should have notify_runnable_version_change event for flow"); + assert!( + !flow_events.is_empty(), + "Should have notify_runnable_version_change event for flow" + ); // Verify payload format let parts: Vec<&str> = flow_events[0].payload.split(':').collect(); assert!(parts.len() >= 4, "Payload should have at least 4 parts"); - assert_eq!(parts[0], "test-workspace", "First part should be workspace_id"); + assert_eq!( + parts[0], "test-workspace", + "First part should be workspace_id" + ); assert_eq!(parts[1], "flow", "Second part should be 'flow'"); } @@ -521,13 +617,16 @@ async fn test_concurrent_event_insertion(db: Pool) { handle.await.expect("Task should complete"); } - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); - let concurrent_events: Vec<_> = events - .iter() - .filter(|e| e.channel == channel) - .collect(); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); + let concurrent_events: Vec<_> = events.iter().filter(|e| e.channel == channel).collect(); - assert_eq!(concurrent_events.len(), 10, "Should have all 10 concurrent events"); + assert_eq!( + concurrent_events.len(), + 10, + "Should have all 10 concurrent events" + ); // Verify all events have unique IDs let ids: std::collections::HashSet = concurrent_events.iter().map(|e| e.id).collect(); @@ -571,18 +670,45 @@ async fn test_polling_isolation(db: Pool) { .expect("Failed to insert event"); // Two different "consumers" polling from different points - let events_from_baseline = poll_notify_events(&db, baseline_id).await.expect("Should poll events"); - let events_from_id1 = poll_notify_events(&db, id1).await.expect("Should poll events"); - let events_from_id2 = poll_notify_events(&db, id2).await.expect("Should poll events"); + let events_from_baseline = poll_notify_events(&db, baseline_id) + .await + .expect("Should poll events"); + let events_from_id1 = poll_notify_events(&db, id1) + .await + .expect("Should poll events"); + let events_from_id2 = poll_notify_events(&db, id2) + .await + .expect("Should poll events"); // Filter to our test events - let from_baseline: Vec<_> = events_from_baseline.iter().filter(|e| e.channel == channel).collect(); - let from_id1: Vec<_> = events_from_id1.iter().filter(|e| e.channel == channel).collect(); - let from_id2: Vec<_> = events_from_id2.iter().filter(|e| e.channel == channel).collect(); + let from_baseline: Vec<_> = events_from_baseline + .iter() + .filter(|e| e.channel == channel) + .collect(); + let from_id1: Vec<_> = events_from_id1 + .iter() + .filter(|e| e.channel == channel) + .collect(); + let from_id2: Vec<_> = events_from_id2 + .iter() + .filter(|e| e.channel == channel) + .collect(); - assert_eq!(from_baseline.len(), 3, "Polling from baseline should include all 3 events"); - assert_eq!(from_id1.len(), 2, "Polling from id1 should include id2 and id3"); - assert_eq!(from_id2.len(), 1, "Polling from id2 should include only id3"); + assert_eq!( + from_baseline.len(), + 3, + "Polling from baseline should include all 3 events" + ); + assert_eq!( + from_id1.len(), + 2, + "Polling from id1 should include id2 and id3" + ); + assert_eq!( + from_id2.len(), + 1, + "Polling from id2 should include only id3" + ); } // ============================================================================ @@ -595,14 +721,23 @@ async fn test_empty_payload(db: Pool) { insert_test_event(&db, "test_empty_payload", "").await; - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let empty_events: Vec<_> = events .iter() .filter(|e| e.channel == "test_empty_payload") .collect(); - assert_eq!(empty_events.len(), 1, "Should have event with empty payload"); - assert_eq!(empty_events[0].payload, "", "Payload should be empty string"); + assert_eq!( + empty_events.len(), + 1, + "Should have event with empty payload" + ); + assert_eq!( + empty_events[0].payload, "", + "Payload should be empty string" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -613,14 +748,24 @@ async fn test_large_payload(db: Pool) { let large_payload = "x".repeat(1024); insert_test_event(&db, "test_large_payload", &large_payload).await; - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let large_events: Vec<_> = events .iter() .filter(|e| e.channel == "test_large_payload") .collect(); - assert_eq!(large_events.len(), 1, "Should have event with large payload"); - assert_eq!(large_events[0].payload.len(), 1024, "Payload should be preserved"); + assert_eq!( + large_events.len(), + 1, + "Should have event with large payload" + ); + assert_eq!( + large_events[0].payload.len(), + 1024, + "Payload should be preserved" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -630,14 +775,23 @@ async fn test_special_characters_in_payload(db: Pool) { let special_payload = r#"{"key": "value with \"quotes\" and 'apostrophes'", "unicode": "日本語", "newline": "line1\nline2"}"#; insert_test_event(&db, "test_special_chars", special_payload).await; - let events = poll_notify_events(&db, before_id).await.expect("Should poll events"); + let events = poll_notify_events(&db, before_id) + .await + .expect("Should poll events"); let special_events: Vec<_> = events .iter() .filter(|e| e.channel == "test_special_chars") .collect(); - assert_eq!(special_events.len(), 1, "Should have event with special characters"); - assert_eq!(special_events[0].payload, special_payload, "Special characters should be preserved"); + assert_eq!( + special_events.len(), + 1, + "Should have event with special characters" + ); + assert_eq!( + special_events[0].payload, special_payload, + "Special characters should be preserved" + ); } #[sqlx::test(migrations = "../migrations", fixtures("base"))] @@ -664,7 +818,9 @@ async fn test_cleanup_with_no_old_events(db: Pool) { assert_eq!(before_count, 2, "Should have 2 recent events"); // Cleanup old events (none of our events should be deleted since they're recent) - let _deleted = cleanup_old_events(&db, 10).await.expect("Should cleanup events"); + let _deleted = cleanup_old_events(&db, 10) + .await + .expect("Should cleanup events"); let after_count = count_events_for_channel(&db, &channel).await; assert_eq!(after_count, 2, "Recent events should not be deleted"); @@ -732,7 +888,11 @@ impl ServerProcess { } fn logs_contain(&self, needle: &str) -> bool { - self.log_lines.lock().unwrap().iter().any(|l| l.contains(needle)) + self.log_lines + .lock() + .unwrap() + .iter() + .any(|l| l.contains(needle)) } fn dump_logs(&self) -> String { @@ -783,12 +943,17 @@ async fn test_two_server_processes_both_receive_event() { let mut server_b = ServerProcess::start(19200, &db_url); // Wait for both servers to be ready - let (ready_a, ready_b) = tokio::join!( - wait_for_server(19100, 30), - wait_for_server(19200, 30), + let (ready_a, ready_b) = tokio::join!(wait_for_server(19100, 30), wait_for_server(19200, 30),); + assert!( + ready_a, + "Server A (port 19100) failed to start. Logs:\n{}", + server_a.dump_logs() + ); + assert!( + ready_b, + "Server B (port 19200) failed to start. Logs:\n{}", + server_b.dump_logs() ); - assert!(ready_a, "Server A (port 19100) failed to start. Logs:\n{}", server_a.dump_logs()); - assert!(ready_b, "Server B (port 19200) failed to start. Logs:\n{}", server_b.dump_logs()); // Give servers a moment to complete their first poll cycle tokio::time::sleep(std::time::Duration::from_secs(2)).await; diff --git a/backend/windmill-native-triggers/src/google/external.rs b/backend/windmill-native-triggers/src/google/external.rs index 756090fa02277..f68706fb2f331 100644 --- a/backend/windmill-native-triggers/src/google/external.rs +++ b/backend/windmill-native-triggers/src/google/external.rs @@ -11,7 +11,7 @@ use windmill_common::{ use windmill_queue::PushArgsOwned; use crate::{ - generate_webhook_service_url, get_token_by_prefix, + generate_webhook_service_url, rotate_webhook_token, sync::{SyncAction, SyncError, TriggerSyncInfo}, update_native_trigger_error, update_native_trigger_service_config, External, NativeTrigger, NativeTriggerData, ServiceName, @@ -309,14 +309,16 @@ impl Google { } /// Renew an expiring Google watch channel. - /// Stops the old channel and creates a new one with the same channel ID. - /// Returns the updated service_config with new expiration. + /// Rotates the webhook token (creating a new one with the same label), + /// stops the old channel and creates a new one with the same channel ID. + /// Returns (new_service_config, new_plaintext_token, old_token_hash). + /// Callers should delete old_token_hash after successfully updating the trigger. pub async fn renew_channel( &self, w_id: &str, trigger: &NativeTrigger, db: &DB, - ) -> Result { + ) -> Result<(serde_json::Value, String, String)> { let config: GoogleServiceConfig = trigger .service_config .as_ref() @@ -324,10 +326,15 @@ impl Google { .transpose()? .ok_or_else(|| Error::InternalErr("Missing service config".to_string()))?; - let webhook_token = get_token_by_prefix(db, &trigger.webhook_token_prefix) - .await? - .ok_or_else(|| Error::InternalErr("Webhook token not found".to_string()))?; - + let rotated = match rotate_webhook_token(db, &trigger.webhook_token_hash).await? { + Some(r) => r, + None => { + return Err(Error::InternalErr(format!( + "Cannot renew channel {}: webhook token no longer exists and no user context to create a fresh one", + trigger.external_id + ))); + } + }; let base_url = &*BASE_URL.read().await; // Reuse the same channel ID so external_id stays permanent let channel_id = trigger.external_id.clone(); @@ -338,7 +345,7 @@ impl Google { trigger.is_flow, Some(&channel_id), ServiceName::Google, - &webhook_token, + &rotated.new_token, ); tracing::info!( @@ -403,8 +410,9 @@ impl Google { new_config.google_resource_id = Some(resp.resource_id); new_config.expiration = Some(resp.expiration); - serde_json::to_value(&new_config) - .map_err(|e| Error::internal_err(format!("Failed to serialize config: {}", e))) + let config_value = serde_json::to_value(&new_config) + .map_err(|e| Error::internal_err(format!("Failed to serialize config: {}", e)))?; + Ok((config_value, rotated.new_token, rotated.old_token_hash)) } } @@ -461,17 +469,25 @@ async fn renew_expiring_channels( ); match handler.renew_channel(workspace_id, trigger, db).await { - Ok(new_config) => { + Ok((new_config, new_token, old_token_hash)) => { match update_native_trigger_service_config( db, workspace_id, ServiceName::Google, &trigger.external_id, &new_config, + Some(&new_token), ) .await { Ok(()) => { + // Trigger updated — clean up old token (best-effort) + if let Err(e) = crate::delete_token_by_hash(db, &old_token_hash).await { + tracing::warn!( + "Failed to delete old webhook token after channel renewal for {}: {}", + trigger.external_id, e + ); + } tracing::info!( "Renewed Google channel {} for '{}'", trigger.external_id, diff --git a/backend/windmill-native-triggers/src/handler.rs b/backend/windmill-native-triggers/src/handler.rs index 9384bde8b6fb0..b66a8ae3e0d5b 100644 --- a/backend/windmill-native-triggers/src/handler.rs +++ b/backend/windmill-native-triggers/src/handler.rs @@ -1,6 +1,6 @@ use crate::{ - decrypt_oauth_data, delete_native_trigger, delete_token_by_prefix, get_native_trigger, - get_token_by_prefix, list_native_triggers, store_native_trigger, update_native_trigger_error, + decrypt_oauth_data, delete_native_trigger, delete_token_by_hash, get_native_trigger, + list_native_triggers, rotate_webhook_token, store_native_trigger, update_native_trigger_error, External, NativeTrigger, NativeTriggerConfig, NativeTriggerData, ServiceName, }; use axum::{ @@ -234,31 +234,48 @@ async fn update_native_trigger_handler( let runnable_changed = existing.script_path != data.script_path || existing.is_flow != data.is_flow; - let webhook_token = match get_token_by_prefix(&db, &existing.webhook_token_prefix).await? { - Some(token) if !runnable_changed => token, - existing_token => { - if let Some(_) = existing_token { - delete_token_by_prefix(&db, &existing.webhook_token_prefix).await?; - } else { - tracing::warn!( - "Webhook token not found for trigger {} (prefix: {}), recreating token", - external_id, - existing.webhook_token_prefix - ); + // Track old token hash so we can clean it up after everything succeeds + let mut old_token_hash_to_delete: Option = None; + + let webhook_token = if runnable_changed { + // Scopes change when the runnable changes — delete old, create fresh token + old_token_hash_to_delete = Some(existing.webhook_token_hash.clone()); + let token = new_webhook_token( + &mut *tx, + &db, + &authed, + &data.script_path, + data.is_flow, + &workspace_id, + service_name, + ) + .await?; + tx.commit().await?; + tx = user_db.begin(&authed).await?; + token + } else { + // Same runnable — rotate the token keeping the same label + match rotate_webhook_token(&db, &existing.webhook_token_hash).await? { + Some(rotated) => { + old_token_hash_to_delete = Some(rotated.old_token_hash); + rotated.new_token + } + None => { + // Old token gone — create a fresh one + let token = new_webhook_token( + &mut *tx, + &db, + &authed, + &data.script_path, + data.is_flow, + &workspace_id, + service_name, + ) + .await?; + tx.commit().await?; + tx = user_db.begin(&authed).await?; + token } - let token = new_webhook_token( - &mut *tx, - &db, - &authed, - &data.script_path, - data.is_flow, - &workspace_id, - service_name, - ) - .await?; - tx.commit().await?; - tx = user_db.begin(&authed).await?; - token } }; @@ -303,6 +320,16 @@ async fn update_native_trigger_handler( tx.commit().await?; + // Everything succeeded — clean up old token (best-effort) + if let Some(old_hash) = old_token_hash_to_delete { + if let Err(e) = delete_token_by_hash(&db, &old_hash).await { + tracing::warn!( + "Failed to delete old webhook token after trigger update: {}", + e + ); + } + } + Ok(format!("Native trigger updated")) } @@ -428,12 +455,12 @@ async fn delete_native_trigger_handler( return Err(Error::NotFound(format!("Native trigger not found"))); } - // Delete the webhook token using its prefix - if !delete_token_by_prefix(&db, &existing.webhook_token_prefix).await? { + // Delete the webhook token using its hash + if !delete_token_by_hash(&db, &existing.webhook_token_hash).await? { tracing::warn!( - "Webhook token not found when deleting trigger {} (prefix: {})", + "Webhook token not found when deleting trigger {} (hash: {})", external_id, - existing.webhook_token_prefix + existing.webhook_token_hash ); } diff --git a/backend/windmill-native-triggers/src/lib.rs b/backend/windmill-native-triggers/src/lib.rs index 416cf62d86ac3..25234ff3f0c8f 100644 --- a/backend/windmill-native-triggers/src/lib.rs +++ b/backend/windmill-native-triggers/src/lib.rs @@ -190,7 +190,7 @@ pub struct NativeTrigger { pub service_name: ServiceName, pub script_path: String, pub is_flow: bool, - pub webhook_token_prefix: String, + pub webhook_token_hash: String, pub service_config: Option, pub error: Option, pub created_at: DateTime, @@ -731,41 +731,85 @@ async fn update_oauth_token_resource( } } -/// Look up the full token from the token table using its prefix -pub async fn get_token_by_prefix<'c, E: sqlx::Executor<'c, Database = Postgres>>( - db: E, - token_prefix: &str, -) -> Result> { - let token = sqlx::query_scalar!( - r#" - SELECT token as "token!" - FROM token - WHERE token LIKE concat($1::text, '%') - LIMIT 1 - "#, - token_prefix +/// Create a new webhook token that keeps the same label as the old one. +/// The old token is **not** deleted — callers must call `delete_token_by_hash` +/// on `old_token_hash` after the trigger row has been successfully updated. +/// This ensures the trigger keeps working if the external service call or +/// subsequent DB update fails. +/// +/// Returns `Ok(None)` if the old token no longer exists (e.g. manually deleted by user). +/// In that case, `renew_channel` returns an error which `renew_expiring_channels` writes +/// to the trigger's `error` column — visible in the UI so the user can re-create the trigger. +pub async fn rotate_webhook_token(db: &DB, old_token_hash: &str) -> Result> { + use windmill_common::auth::{hash_token, TOKEN_PREFIX_LEN}; + use windmill_common::min_version::MIN_VERSION_SUPPORTS_TOKEN_HASH; + use windmill_common::utils::rd_string; + + let old = match sqlx::query!( + "SELECT label, email, scopes, workspace_id, super_admin, owner, expiration FROM token WHERE token_hash = $1", + old_token_hash ) .fetch_optional(db) + .await? + { + Some(row) => row, + None => { + tracing::warn!( + "Webhook token not found for hash {}, caller should create a fresh token", + old_token_hash + ); + return Ok(None); + } + }; + + let new_token = rd_string(32); + let new_hash = hash_token(&new_token); + let new_prefix = new_token.get(..TOKEN_PREFIX_LEN).unwrap_or(&new_token); + let plaintext: Option<&str> = if MIN_VERSION_SUPPORTS_TOKEN_HASH.met().await { + None + } else { + Some(&new_token) + }; + + sqlx::query!( + "INSERT INTO token (token_hash, token_prefix, token, email, label, super_admin, scopes, workspace_id, owner, expiration) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + new_hash, + new_prefix, + plaintext as Option<&str>, + old.email, + old.label, + old.super_admin, + old.scopes.as_deref(), + old.workspace_id, + old.owner, + old.expiration, + ) + .execute(db) .await?; - Ok(token) + Ok(Some(RotatedToken { + new_token, + old_token_hash: old_token_hash.to_string(), + })) +} + +pub struct RotatedToken { + pub new_token: String, + /// Hash of the old token — callers should delete this after the + /// trigger row has been successfully updated to point at the new token. + pub old_token_hash: String, } -/// Delete a token from the token table using its prefix -pub async fn delete_token_by_prefix<'c, E: sqlx::Executor<'c, Database = Postgres>>( +/// Delete a token from the token table using its hash (exact match). +pub async fn delete_token_by_hash<'c, E: sqlx::Executor<'c, Database = Postgres>>( db: E, - token_prefix: &str, + token_hash: &str, ) -> Result { - let deleted = sqlx::query!( - r#" - DELETE FROM token - WHERE token LIKE concat($1::text, '%') - "#, - token_prefix - ) - .execute(db) - .await? - .rows_affected(); + let deleted = sqlx::query!("DELETE FROM token WHERE token_hash = $1", token_hash) + .execute(db) + .await? + .rows_affected(); Ok(deleted > 0) } @@ -778,8 +822,9 @@ pub async fn store_native_trigger<'c, E: sqlx::Executor<'c, Database = Postgres> config: &NativeTriggerConfig, service_config: C, ) -> Result<()> { - // Store only the first 10 characters of the webhook token as a prefix - let webhook_token_prefix: String = config.webhook_token.chars().take(10).collect(); + use windmill_common::auth::hash_token; + + let webhook_token_hash = hash_token(&config.webhook_token); sqlx::query!( r#" @@ -789,20 +834,20 @@ pub async fn store_native_trigger<'c, E: sqlx::Executor<'c, Database = Postgres> service_name, script_path, is_flow, - webhook_token_prefix, + webhook_token_hash, service_config ) VALUES ( $1, $2, $3, $4, $5, $6, $7 ) ON CONFLICT (external_id, workspace_id, service_name) - DO UPDATE SET script_path = $4, is_flow = $5, webhook_token_prefix = $6, service_config = $7, error = NULL, updated_at = NOW() + DO UPDATE SET script_path = $4, is_flow = $5, webhook_token_hash = $6, service_config = $7, error = NULL, updated_at = NOW() "#, external_id, workspace_id, service_name as ServiceName, config.script_path, config.is_flow, - webhook_token_prefix, + webhook_token_hash, sqlx::types::Json(service_config) as _, ) .execute(db) @@ -819,13 +864,14 @@ pub async fn update_native_trigger<'c, E: sqlx::Executor<'c, Database = Postgres config: &NativeTriggerConfig, service_config: Option<&RawValue>, ) -> Result<()> { - // Store only the first 10 characters of the webhook token as a prefix - let webhook_token_prefix: String = config.webhook_token.chars().take(10).collect(); + use windmill_common::auth::hash_token; + + let webhook_token_hash = hash_token(&config.webhook_token); sqlx::query!( r#" UPDATE native_trigger - SET script_path = $1, is_flow = $2, webhook_token_prefix = $3, service_config = $4, error = NULL, updated_at = NOW() + SET script_path = $1, is_flow = $2, webhook_token_hash = $3, service_config = $4, error = NULL, updated_at = NOW() WHERE workspace_id = $5 AND service_name = $6 @@ -833,7 +879,7 @@ pub async fn update_native_trigger<'c, E: sqlx::Executor<'c, Database = Postgres "#, config.script_path, config.is_flow, - webhook_token_prefix, + webhook_token_hash, service_config.map(sqlx::types::Json) as _, workspace_id, service_name as ServiceName, @@ -884,7 +930,7 @@ pub async fn get_native_trigger<'c, E: sqlx::Executor<'c, Database = Postgres>>( service_name AS "service_name!: ServiceName", script_path, is_flow, - webhook_token_prefix, + webhook_token_hash, service_config, error, created_at, @@ -922,7 +968,7 @@ pub async fn get_native_trigger_by_script<'c, E: sqlx::Executor<'c, Database = P service_name AS "service_name!: ServiceName", script_path, is_flow, - webhook_token_prefix, + webhook_token_hash, service_config, error, created_at, @@ -968,7 +1014,7 @@ pub async fn list_native_triggers<'c, E: sqlx::Executor<'c, Database = Postgres> nt.service_name AS "service_name!: ServiceName", nt.script_path, nt.is_flow, - nt.webhook_token_prefix, + nt.webhook_token_hash, nt.service_config, nt.error, nt.created_at, @@ -1045,11 +1091,16 @@ pub async fn update_native_trigger_service_config< service_name: ServiceName, external_id: &str, service_config: &serde_json::Value, + new_webhook_token: Option<&str>, ) -> Result<()> { + let new_hash = new_webhook_token.map(windmill_common::auth::hash_token); + sqlx::query!( r#" UPDATE native_trigger - SET service_config = $1, updated_at = NOW() + SET service_config = $1, + webhook_token_hash = COALESCE($5, webhook_token_hash), + updated_at = NOW() WHERE workspace_id = $2 AND service_name = $3 @@ -1059,6 +1110,7 @@ pub async fn update_native_trigger_service_config< workspace_id, service_name as ServiceName, external_id, + new_hash.as_deref(), ) .execute(db) .await?; diff --git a/backend/windmill-native-triggers/src/sync.rs b/backend/windmill-native-triggers/src/sync.rs index 873ba2255742e..292d00de2a763 100644 --- a/backend/windmill-native-triggers/src/sync.rs +++ b/backend/windmill-native-triggers/src/sync.rs @@ -390,6 +390,7 @@ pub async fn reconcile_with_external_state( service_name, &trigger.external_id, external_service_config, + None, ) .await { diff --git a/backend/windmill-native-triggers/src/workspace_integrations.rs b/backend/windmill-native-triggers/src/workspace_integrations.rs index 3d3db5eea5572..87d40d5b05426 100644 --- a/backend/windmill-native-triggers/src/workspace_integrations.rs +++ b/backend/windmill-native-triggers/src/workspace_integrations.rs @@ -34,8 +34,8 @@ use windmill_api_auth::ApiAuthed; #[cfg(feature = "native_trigger")] use crate::{ - decrypt_oauth_data, delete_token_by_prefix, delete_workspace_integration, - nextcloud::OcsResponse, resolve_endpoint, store_workspace_integration, ServiceName, + decrypt_oauth_data, delete_token_by_hash, delete_workspace_integration, nextcloud::OcsResponse, + resolve_endpoint, store_workspace_integration, ServiceName, }; #[cfg(feature = "native_trigger")] @@ -253,7 +253,7 @@ async fn fetch_nextcloud_user_id(base_url: &str, access_token: &str) -> anyhow:: #[cfg(feature = "native_trigger")] async fn delete_triggers_for_service(db: &DB, workspace_id: &str, service_name: ServiceName) { let triggers = sqlx::query!( - "SELECT external_id, webhook_token_prefix FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", + "SELECT external_id, webhook_token_hash FROM native_trigger WHERE workspace_id = $1 AND service_name = $2", workspace_id, service_name as ServiceName ) @@ -303,10 +303,10 @@ async fn delete_triggers_for_service(db: &DB, workspace_id: &str, service_name: // Delete all associated webhook tokens for trigger in &triggers { - if let Err(e) = delete_token_by_prefix(db, &trigger.webhook_token_prefix).await { + if let Err(e) = delete_token_by_hash(db, &trigger.webhook_token_hash).await { tracing::error!( - "Failed to delete webhook token with prefix {}: {e}", - trigger.webhook_token_prefix + "Failed to delete webhook token with hash {}: {e}", + trigger.webhook_token_hash ); } } diff --git a/backend/windmill-queue/tests/fixtures/base.sql b/backend/windmill-queue/tests/fixtures/base.sql index 7db9918fba0c1..412fa1029f936 100644 --- a/backend/windmill-queue/tests/fixtures/base.sql +++ b/backend/windmill-queue/tests/fixtures/base.sql @@ -33,9 +33,9 @@ INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES INSERT INTO usr(workspace_id, email, username, is_admin, role) VALUES ('test-workspace', 'test3@windmill.dev', 'test-user-3', false, 'User'); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN', 'test@windmill.dev', 'test token', true); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); -insert INTO token(token, email, label, super_admin) VALUES ('SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN', 'test@windmill.dev', 'test token', true); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_2'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_2', 'test2@windmill.dev', 'test token 2', false); +insert INTO token(token_hash, token_prefix, token, email, label, super_admin) VALUES (encode(sha256('SECRET_TOKEN_3'::bytea), 'hex'), 'SECRET_TOK', 'SECRET_TOKEN_3', 'test3@windmill.dev', 'test token 3', false); GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_admin; GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user;