From 24b23243790d8221975fd898307d3a747017c2d7 Mon Sep 17 00:00:00 2001 From: Tanner Stirrat Date: Wed, 26 Nov 2025 08:24:15 -0700 Subject: [PATCH 1/6] chore: upgrade to nextra v4 --- .gitignore | 2 + TODO.md | 7 + app/docs/[[...mdxPath]]/page.tsx | 27 + app/layout.tsx | 108 + components/banner.tsx | 2 + components/cta.tsx | 2 + components/footer.tsx | 12 - components/icons/logo-icon.svg | 16 + components/icons/logo.svg | 67 + components/logo.tsx | 92 - components/nextra/ExternalIcon.tsx | 21 - components/nextra/Flexsearch.tsx | 408 ---- components/nextra/HighlightMatches.tsx | 71 - components/nextra/Input.tsx | 52 - components/nextra/Search.tsx | 324 --- components/nextra/types.ts | 31 - components/overview-page.tsx | 38 + components/swagger.tsx | 4 +- components/ui/button.tsx | 2 +- content/_app.tsx | 18 + content/_meta.ts | 24 + content/authzed/_meta.ts | 8 + content/authzed/api/_meta.ts | 15 + content/authzed/api/http-api.mdx | 5 + content/authzed/concepts/audit-logging.mdx | 199 ++ .../authzed/concepts/authzed-materialize.mdx | 612 +++++ .../authzed/concepts/expedited-support.mdx | 83 + content/authzed/concepts/feature-maturity.mdx | 31 + .../authzed/concepts/management-dashboard.mdx | 9 + content/authzed/concepts/multi-region.mdx | 5 + .../authzed/concepts/private-networking.mdx | 21 + .../concepts/restricted-api-access.mdx | 315 +++ content/authzed/concepts/security-embargo.mdx | 70 + content/authzed/concepts/update-channels.mdx | 34 + .../authzed/concepts/workload-isolation.mdx | 31 + content/authzed/guides/picking-a-product.mdx | 126 ++ .../guides/setting-up-private-networking.mdx | 152 ++ content/authzed/links/_meta.ts | 20 + content/best-practices/_meta.ts | 25 + content/best-practices/index.mdx | 353 +++ content/globals.css | 95 + content/index.mdx | 13 + content/mcp/_meta.ts | 18 + content/mcp/authzed/_meta.ts | 10 + content/mcp/authzed/authzed-mcp-server.mdx | 216 ++ .../mcp/authzed/spicedb-dev-mcp-server.mdx | 326 +++ content/mcp/index.mdx | 120 + content/spicedb/_meta.ts | 10 + content/spicedb/api/_meta.ts | 15 + content/spicedb/api/http-api.mdx | 5 + content/spicedb/concepts/_meta.ts | 15 + content/spicedb/concepts/caveats.mdx | 326 +++ content/spicedb/concepts/commands.mdx | 589 +++++ content/spicedb/concepts/consistency.mdx | 177 ++ .../spicedb/concepts/datastore-migrations.mdx | 127 ++ content/spicedb/concepts/datastores.mdx | 435 ++++ .../concepts/expiring-relationships.mdx | 175 ++ content/spicedb/concepts/reflection-apis.mdx | 134 ++ content/spicedb/concepts/relationships.mdx | 214 ++ content/spicedb/concepts/schema.mdx | 524 +++++ content/spicedb/concepts/watch.mdx | 181 ++ content/spicedb/concepts/zanzibar.mdx | 272 +++ content/spicedb/getting-started/_meta.ts | 12 + .../getting-started/client-libraries.mdx | 48 + .../getting-started/coming-from/_meta.ts | 6 + .../getting-started/coming-from/cancancan.mdx | 65 + .../getting-started/coming-from/opa.mdx | 70 + .../getting-started/discovering-spicedb.mdx | 98 + content/spicedb/getting-started/faq.mdx | 58 + .../spicedb/getting-started/first-steps.mdx | 98 + .../spicedb/getting-started/install/_meta.ts | 10 + .../getting-started/install/debian.mdx | 73 + .../getting-started/install/docker.mdx | 49 + .../getting-started/install/kubernetes.mdx | 90 + .../spicedb/getting-started/install/macos.mdx | 39 + .../spicedb/getting-started/install/rhel.mdx | 47 + .../getting-started/install/windows.mdx | 21 + .../getting-started/installing-zed.mdx | 1513 +++++++++++++ content/spicedb/getting-started/page.mdx | 0 .../getting-started/protecting-a-blog.mdx | 1115 ++++++++++ content/spicedb/links/_meta.ts | 24 + content/spicedb/modeling/_meta.ts | 18 + .../spicedb/modeling/access-control-audit.mdx | 107 + .../modeling/access-control-management.mdx | 108 + content/spicedb/modeling/attributes.mdx | 107 + .../spicedb/modeling/composable-schemas.mdx | 206 ++ .../spicedb/modeling/developing-a-schema.mdx | 419 ++++ content/spicedb/modeling/migrating-schema.mdx | 210 ++ .../modeling/protecting-a-list-endpoint.mdx | 125 ++ .../modeling/recursion-and-max-depth.mdx | 149 ++ .../spicedb/modeling/representing-users.mdx | 138 ++ .../modeling/validation-testing-debugging.mdx | 288 +++ content/spicedb/ops/_meta.ts | 13 + .../spicedb/ops/ai-agent-authorization.mdx | 60 + content/spicedb/ops/data/_meta.ts | 7 + content/spicedb/ops/data/bulk-operations.mdx | 126 ++ content/spicedb/ops/data/migrations.mdx | 53 + .../ops/data/writing-relationships.mdx | 112 + .../ops/deploying-spicedb-operator.mdx | 135 ++ content/spicedb/ops/eks.mdx | 336 +++ content/spicedb/ops/load-testing.mdx | 376 ++++ content/spicedb/ops/observability.mdx | 159 ++ content/spicedb/ops/operator.mdx | 251 +++ content/spicedb/ops/performance.mdx | 93 + content/spicedb/ops/secure-rag-pipelines.mdx | 74 + globals.css | 35 - mdx-components.ts | 9 + next-env.d.ts | 4 +- next.config.mjs | 7 +- package.json | 26 +- pagefind.log | 28 + pnpm-lock.yaml | 1962 ++++++++--------- pnpm-workspace.yaml | 15 + postcss.config.js | 7 - postcss.config.mjs | 5 + public/feed.json | 424 +++- scripts/buildSearchIndex.mts | 69 + scripts/postbuild.sh | 4 +- tailwind.config.ts | 51 - theme.config.tsx | 77 - tsconfig.json | 33 +- 121 files changed, 15033 insertions(+), 2268 deletions(-) create mode 100644 TODO.md create mode 100644 app/docs/[[...mdxPath]]/page.tsx create mode 100644 app/layout.tsx create mode 100644 components/icons/logo-icon.svg create mode 100644 components/icons/logo.svg delete mode 100644 components/logo.tsx delete mode 100644 components/nextra/ExternalIcon.tsx delete mode 100644 components/nextra/Flexsearch.tsx delete mode 100644 components/nextra/HighlightMatches.tsx delete mode 100644 components/nextra/Input.tsx delete mode 100644 components/nextra/Search.tsx delete mode 100644 components/nextra/types.ts create mode 100644 components/overview-page.tsx create mode 100644 content/_app.tsx create mode 100644 content/_meta.ts create mode 100644 content/authzed/_meta.ts create mode 100644 content/authzed/api/_meta.ts create mode 100644 content/authzed/api/http-api.mdx create mode 100644 content/authzed/concepts/audit-logging.mdx create mode 100644 content/authzed/concepts/authzed-materialize.mdx create mode 100644 content/authzed/concepts/expedited-support.mdx create mode 100644 content/authzed/concepts/feature-maturity.mdx create mode 100644 content/authzed/concepts/management-dashboard.mdx create mode 100644 content/authzed/concepts/multi-region.mdx create mode 100644 content/authzed/concepts/private-networking.mdx create mode 100644 content/authzed/concepts/restricted-api-access.mdx create mode 100644 content/authzed/concepts/security-embargo.mdx create mode 100644 content/authzed/concepts/update-channels.mdx create mode 100644 content/authzed/concepts/workload-isolation.mdx create mode 100644 content/authzed/guides/picking-a-product.mdx create mode 100644 content/authzed/guides/setting-up-private-networking.mdx create mode 100644 content/authzed/links/_meta.ts create mode 100644 content/best-practices/_meta.ts create mode 100644 content/best-practices/index.mdx create mode 100644 content/globals.css create mode 100644 content/index.mdx create mode 100644 content/mcp/_meta.ts create mode 100644 content/mcp/authzed/_meta.ts create mode 100644 content/mcp/authzed/authzed-mcp-server.mdx create mode 100644 content/mcp/authzed/spicedb-dev-mcp-server.mdx create mode 100644 content/mcp/index.mdx create mode 100644 content/spicedb/_meta.ts create mode 100644 content/spicedb/api/_meta.ts create mode 100644 content/spicedb/api/http-api.mdx create mode 100644 content/spicedb/concepts/_meta.ts create mode 100644 content/spicedb/concepts/caveats.mdx create mode 100644 content/spicedb/concepts/commands.mdx create mode 100644 content/spicedb/concepts/consistency.mdx create mode 100644 content/spicedb/concepts/datastore-migrations.mdx create mode 100644 content/spicedb/concepts/datastores.mdx create mode 100644 content/spicedb/concepts/expiring-relationships.mdx create mode 100644 content/spicedb/concepts/reflection-apis.mdx create mode 100644 content/spicedb/concepts/relationships.mdx create mode 100644 content/spicedb/concepts/schema.mdx create mode 100644 content/spicedb/concepts/watch.mdx create mode 100644 content/spicedb/concepts/zanzibar.mdx create mode 100644 content/spicedb/getting-started/_meta.ts create mode 100644 content/spicedb/getting-started/client-libraries.mdx create mode 100644 content/spicedb/getting-started/coming-from/_meta.ts create mode 100644 content/spicedb/getting-started/coming-from/cancancan.mdx create mode 100644 content/spicedb/getting-started/coming-from/opa.mdx create mode 100644 content/spicedb/getting-started/discovering-spicedb.mdx create mode 100644 content/spicedb/getting-started/faq.mdx create mode 100644 content/spicedb/getting-started/first-steps.mdx create mode 100644 content/spicedb/getting-started/install/_meta.ts create mode 100644 content/spicedb/getting-started/install/debian.mdx create mode 100644 content/spicedb/getting-started/install/docker.mdx create mode 100644 content/spicedb/getting-started/install/kubernetes.mdx create mode 100644 content/spicedb/getting-started/install/macos.mdx create mode 100644 content/spicedb/getting-started/install/rhel.mdx create mode 100644 content/spicedb/getting-started/install/windows.mdx create mode 100644 content/spicedb/getting-started/installing-zed.mdx create mode 100644 content/spicedb/getting-started/page.mdx create mode 100644 content/spicedb/getting-started/protecting-a-blog.mdx create mode 100644 content/spicedb/links/_meta.ts create mode 100644 content/spicedb/modeling/_meta.ts create mode 100644 content/spicedb/modeling/access-control-audit.mdx create mode 100644 content/spicedb/modeling/access-control-management.mdx create mode 100644 content/spicedb/modeling/attributes.mdx create mode 100644 content/spicedb/modeling/composable-schemas.mdx create mode 100644 content/spicedb/modeling/developing-a-schema.mdx create mode 100644 content/spicedb/modeling/migrating-schema.mdx create mode 100644 content/spicedb/modeling/protecting-a-list-endpoint.mdx create mode 100644 content/spicedb/modeling/recursion-and-max-depth.mdx create mode 100644 content/spicedb/modeling/representing-users.mdx create mode 100644 content/spicedb/modeling/validation-testing-debugging.mdx create mode 100644 content/spicedb/ops/_meta.ts create mode 100644 content/spicedb/ops/ai-agent-authorization.mdx create mode 100644 content/spicedb/ops/data/_meta.ts create mode 100644 content/spicedb/ops/data/bulk-operations.mdx create mode 100644 content/spicedb/ops/data/migrations.mdx create mode 100644 content/spicedb/ops/data/writing-relationships.mdx create mode 100644 content/spicedb/ops/deploying-spicedb-operator.mdx create mode 100644 content/spicedb/ops/eks.mdx create mode 100644 content/spicedb/ops/load-testing.mdx create mode 100644 content/spicedb/ops/observability.mdx create mode 100644 content/spicedb/ops/operator.mdx create mode 100644 content/spicedb/ops/performance.mdx create mode 100644 content/spicedb/ops/secure-rag-pipelines.mdx delete mode 100644 globals.css create mode 100644 mdx-components.ts create mode 100644 pagefind.log create mode 100644 pnpm-workspace.yaml delete mode 100644 postcss.config.js create mode 100644 postcss.config.mjs create mode 100644 scripts/buildSearchIndex.mts delete mode 100644 tailwind.config.ts delete mode 100644 theme.config.tsx diff --git a/.gitignore b/.gitignore index b4f171d..3234b97 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ node_modules/* .next .env* tsconfig.tsbuildinfo +_pagefind/ +out/ # Generated public/robots.txt diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..74417ff --- /dev/null +++ b/TODO.md @@ -0,0 +1,7 @@ +- [ ] check styling of footer against live +- [ ] check styling of header against live +- [ ] check styling of search bar against live +- [ ] check behavior of search bar, especially around docs results ranking higher than non-docs +- [x] make sure that search bar works (check pagefind) +- [x] finish pagefind thought +- [ ] make styling of swagger page look nice diff --git a/app/docs/[[...mdxPath]]/page.tsx b/app/docs/[[...mdxPath]]/page.tsx new file mode 100644 index 0000000..8c56e51 --- /dev/null +++ b/app/docs/[[...mdxPath]]/page.tsx @@ -0,0 +1,27 @@ +import { generateStaticParamsFor, importPage } from 'nextra/pages' +import { useMDXComponents as getMDXComponents } from '@/mdx-components' + +export const generateStaticParams = generateStaticParamsFor('mdxPath') + +export async function generateMetadata(props) { + const params = await props.params + const { metadata } = await importPage(params.mdxPath) + return metadata +} + +const Wrapper = getMDXComponents().wrapper + +export default async function Page(props) { + const params = await props.params + const { + default: MDXContent, + toc, + metadata, + sourceCode + } = await importPage(params.mdxPath) + return ( + + + + ) +} diff --git a/app/layout.tsx b/app/layout.tsx new file mode 100644 index 0000000..053b81a --- /dev/null +++ b/app/layout.tsx @@ -0,0 +1,108 @@ +import { Layout, Navbar, Footer } from 'nextra-theme-docs' +import Link from "next/link"; +import { Head, Search } from 'nextra/components' +import { getPageMap } from 'nextra/page-map' +import 'nextra-theme-docs/style.css' +import Logo from '@/components/icons/logo.svg' +import LogoIcon from '@/components/icons/logo-icon.svg' +import Banner from '@/components/banner' +import { NavCTA, TocCTA } from "@/components/cta"; +import type { Metadata } from 'next' + +// TODO: make sure this is all right +export const metadata: Metadata = { + metadataBase: new URL('https://authzed.com'), + title: { + default: "Authzed Docs", + template: '%s - Authzed Docs' + }, + description: "Welcome to the SpiceDB and AuthZed docs site.", +} + +export default async function RootLayout({ children }) { + const pageMap = await getPageMap() + const enableSearch = process.env.NEXT_PUBLIC_ENABLE_SEARCH_BLOG_INTEGRATION === "true"; + + const navbar = ( + } + logoLink="https://authzed.com" + chatLink="https://authzed.com/discord" + projectLink="https://github.com/authzed/spicedb" + > + + + ) + // TODO + /* + const { title: titleContent, frontMatter } = useConfig(); + const desc = + frontMatter.description || + ; + const resolvedTitle = titleContent + ? `${titleContent} - Authzed Docs` + : "Authzed Docs"; + + + + + */ + + return ( + + + + } + navbar={navbar} + footer={
+
+ {/* TODO: Add footer links here */} + + + +
+ +
+ © {new Date().getFullYear()} AuthZed. +
+
+ } + darkMode + docsRepositoryBase="https://github.com/authzed/docs/tree/main" + search={enableSearch && } + sidebar={{ + defaultMenuCollapseLevel: 1, + toggleButton: true, + }} + pageMap={pageMap} + feedback={{ + content: ( + + Something unclear? +
+ Create an issue → +
+ ), + }} + toc={{ backToTop: true, extraContent: }} + > + {children} +
+ + + ) +} diff --git a/components/banner.tsx b/components/banner.tsx index 07a2012..589ff1d 100644 --- a/components/banner.tsx +++ b/components/banner.tsx @@ -1,3 +1,5 @@ +"use client" + import { usePathname } from "next/navigation"; export default function Banner() { diff --git a/components/cta.tsx b/components/cta.tsx index 6701b3b..3be8392 100644 --- a/components/cta.tsx +++ b/components/cta.tsx @@ -1,3 +1,5 @@ +"use client" + import { Button } from "@/components/ui/button"; import { faPhone } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; diff --git a/components/footer.tsx b/components/footer.tsx index 9e10d84..8058093 100644 --- a/components/footer.tsx +++ b/components/footer.tsx @@ -1,5 +1,3 @@ -import Link from "next/link"; -import { LogoIcon } from "./logo"; import Scripts from "./scripts"; export default function Footer() { @@ -8,16 +6,6 @@ export default function Footer() {
-
- {/* TODO: Add footer links here */} - - - -
- -
- © {new Date().getFullYear()} AuthZed. -
diff --git a/components/icons/logo-icon.svg b/components/icons/logo-icon.svg new file mode 100644 index 0000000..80922e2 --- /dev/null +++ b/components/icons/logo-icon.svg @@ -0,0 +1,16 @@ + + + + + diff --git a/components/icons/logo.svg b/components/icons/logo.svg new file mode 100644 index 0000000..ca77c61 --- /dev/null +++ b/components/icons/logo.svg @@ -0,0 +1,67 @@ + + + + + + + + + + + + + diff --git a/components/logo.tsx b/components/logo.tsx deleted file mode 100644 index 6381c1d..0000000 --- a/components/logo.tsx +++ /dev/null @@ -1,92 +0,0 @@ -export function Logo() { - return ( - - - - - - - - - - - - - - ); -} - -export function LogoIcon() { - return ( - - - - - - ); -} diff --git a/components/nextra/ExternalIcon.tsx b/components/nextra/ExternalIcon.tsx deleted file mode 100644 index 753cc01..0000000 --- a/components/nextra/ExternalIcon.tsx +++ /dev/null @@ -1,21 +0,0 @@ -export default function ExternalIcon(props: { className?: string }) { - return ( - - - - - - ); -} diff --git a/components/nextra/Flexsearch.tsx b/components/nextra/Flexsearch.tsx deleted file mode 100644 index 326eda9..0000000 --- a/components/nextra/Flexsearch.tsx +++ /dev/null @@ -1,408 +0,0 @@ -// Forked from https://github.com/shuding/nextra/blob/7c8c4989021cb556a2f2f9e72b814efa311d7c2b/packages/nextra-theme-docs/src/components/flexsearch.tsx -// MIT License - -// Copyright (c) 2020 Shu Ding - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -import cn from "clsx"; -// flexsearch types are incorrect, they were overwritten in tsconfig.json -import { Document } from "flexsearch"; -import { useRouter } from "next/router"; -import type { SearchData } from "nextra"; -import type { ReactElement, ReactNode } from "react"; -import { useCallback, useState } from "react"; -import ExternalIcon from "./ExternalIcon"; -import { HighlightMatches } from "./HighlightMatches"; -import { Search } from "./Search"; -import { SearchResult } from "./types"; - -// Diff: Inlined definitions -export const DEFAULT_LOCALE = "en-US"; - -type SectionIndex = { - id: string; - url: string; - title: string; - pageId: string; - content: string; - display?: string; -}; - -type PageIndex = { - id: number; - title: string; - content: string; -}; - -// Diff: Additional index for blog posts -type BlogIndex = { - id: number; - title: string; - content: string; - url: string; - summary: string; -}; - -type Result = { - _page_rk: number; - _section_rk: number; - route: string; - prefix: ReactNode; - children: ReactNode; -}; - -// This can be global for better caching. -const indexes: { - // tuple is PageIndex, SectionIndex - [locale: string]: [Document, Document]; -} = {}; - -// Diff: Index for blog posts -// Associated type is BlogIndex -const blogIndex = new Document({ - cache: 100, - tokenize: "forward", - document: { - id: "id", - index: "content", - store: ["title", "url", "summary"], - }, -}); - -// Caches promises that load the index -const loadIndexesPromises = new Map>(); -const loadIndexes = (basePath: string, locale: string): Promise => { - const key = basePath + "@" + locale; - if (loadIndexesPromises.has(key)) { - return loadIndexesPromises.get(key)!; - } - const promise = loadIndexesImpl(basePath, locale); - loadIndexesPromises.set(key, promise); - return promise; -}; - -// Diff: Function for loading blog posts -const loadBlogData = async (basePath: string | undefined) => { - const response = await fetch(`${basePath ?? ""}/feed.json`, { - cache: "force-cache", - }); - const content = await response.json(); - - return content.items.map((item, i) => { - return { - id: i, - title: item.title, - content: item["content_html"], - url: item.url, - summary: item.summary, - }; - }); -}; - -const loadIndexesImpl = async ( - basePath: string, - locale: string, -): Promise => { - const response = await fetch( - `${basePath}/_next/static/chunks/nextra-data-${locale}.json`, - ); - const searchData = (await response.json()) as SearchData; - // Diff: Load blog data - const blogData = await loadBlogData(basePath); - - // Associated type is PageIndex - const pageIndex = new Document({ - cache: 100, - tokenize: "full", - document: { - id: "id", - index: "content", - store: ["title"], - }, - context: { - resolution: 9, - depth: 2, - bidirectional: true, - }, - }); - - // Associated type is SectionIndex - const sectionIndex = new Document({ - cache: 100, - tokenize: "full", - document: { - id: "id", - index: "content", - tag: "pageId", - store: ["title", "content", "url", "display"], - }, - context: { - resolution: 9, - depth: 2, - bidirectional: true, - }, - }); - - let pageId = 0; - - for (const [route, structurizedData] of Object.entries(searchData)) { - let pageContent = ""; - ++pageId; - - for (const [key, content] of Object.entries(structurizedData.data)) { - const [headingId, headingValue] = key.split("#"); - const url = route + (headingId ? "#" + headingId : ""); - const title = headingValue || structurizedData.title; - const paragraphs = content.split("\n"); - - sectionIndex.add({ - id: url, - url, - title, - pageId: `page_${pageId}`, - content: title, - ...(paragraphs[0] && { display: paragraphs[0] }), - }); - - for (let i = 0; i < paragraphs.length; i++) { - sectionIndex.add({ - id: `${url}_${i}`, - url, - title, - pageId: `page_${pageId}`, - content: paragraphs[i], - }); - } - - // Add the page itself. - pageContent += ` ${title} ${content}`; - } - - pageIndex.add({ - id: pageId, - title: structurizedData.title, - content: pageContent, - }); - } - - // Diff: Add posts to index - blogData.map((post) => { - blogIndex.add(post); - }); - - indexes[locale] = [pageIndex, sectionIndex]; -}; - -export function Flexsearch({ - className, -}: { - className?: string; -}): ReactElement { - const { locale = DEFAULT_LOCALE, basePath } = useRouter(); - const [loading, setLoading] = useState(false); - const [error, setError] = useState(false); - const [results, setResults] = useState([]); - const [search, setSearch] = useState(""); - - const doSearch = (search: string) => { - if (!search) return; - const [pageIndex, sectionIndex] = indexes[locale]; - - // Show the results for the top 5 pages - const pageResults = - pageIndex.search(search, 5, { - enrich: true, - suggest: true, - })[0]?.result || []; - - const results: Result[] = []; - const pageTitleMatches: Record = {}; - - // Diff: Actually limit page results to 3 - for (let i = 0; i < Math.min(pageResults.length, 3); i++) { - const result = pageResults[i]; - pageTitleMatches[i] = 0; - - // Show the top 5 results for each page - const sectionResults = - sectionIndex.search(search, 5, { - enrich: true, - suggest: true, - tag: `page_${result.id}`, - })[0]?.result || []; - - let isFirstItemOfPage = true; - const occurred: Record = {}; - - for (let j = 0; j < sectionResults.length; j++) { - const { doc } = sectionResults[j]; - const isMatchingTitle = doc.display !== undefined; - if (isMatchingTitle) { - pageTitleMatches[i]++; - } - const { url, title } = doc; - const content = doc.display || doc.content; - if (occurred[url + "@" + content]) continue; - occurred[url + "@" + content] = true; - results.push({ - _page_rk: i, - _section_rk: j, - route: url, - prefix: isFirstItemOfPage && ( -
- {result.doc.title} -
- ), - children: ( - <> -
- -
- {content && ( -
- -
- )} - - ), - }); - isFirstItemOfPage = false; - } - } - - // Diff: Adjust result sorting - const pageCounts = new Map(); - const docsSorted = results - .sort((a, b) => { - // Sort by number of matches in the title. - if (a._page_rk === b._page_rk) { - return a._section_rk - b._section_rk; - } - if (pageTitleMatches[a._page_rk] !== pageTitleMatches[b._page_rk]) { - return pageTitleMatches[b._page_rk] - pageTitleMatches[a._page_rk]; - } - return a._page_rk - b._page_rk; - }) - .filter((result) => { - const sectionCount = (pageCounts.get(result._page_rk) ?? 0) + 1; - pageCounts.set(result._page_rk, sectionCount); - // Limit section results to 3 - return sectionCount <= 3; - }, []) - .map((res) => ({ - id: `${res._page_rk}_${res._section_rk}`, - route: res.route, - prefix: res.prefix, - children: res.children, - })); - - const blogResults = - blogIndex.search(search, 5, { - enrich: true, - suggest: true, - })[0]?.result || []; - - // Diff: Include blog results - blogResults.map((item, i) => { - // Limit blog results to 3 - if (i >= 3) return; - - docsSorted.push({ - id: `${item.id}`, - route: item.doc.url, - prefix: ( -
- AuthZed Blog -
- ), - children: ( - <> -
- -
- {item.doc.summary && ( -
- -
- )} - - ), - }); - }); - - setResults(docsSorted); - }; - - const preload = useCallback( - async (active: boolean) => { - if (active && !indexes[locale]) { - setLoading(true); - try { - await loadIndexes(basePath, locale); - } catch (e) { - setError(true); - } - setLoading(false); - } - }, - [locale, basePath], - ); - - const handleChange = async (value: string) => { - setSearch(value); - if (loading) { - return; - } - if (!indexes[locale]) { - setLoading(true); - try { - await loadIndexes(basePath, locale); - } catch (e) { - setError(true); - } - setLoading(false); - } - doSearch(value); - }; - - return ( - - ); -} diff --git a/components/nextra/HighlightMatches.tsx b/components/nextra/HighlightMatches.tsx deleted file mode 100644 index 298fbc4..0000000 --- a/components/nextra/HighlightMatches.tsx +++ /dev/null @@ -1,71 +0,0 @@ -// Forked from https://github.com/shuding/nextra/blob/2e78fe5f52a523399eb491fe525b67c7534f2f0e/packages/nextra-theme-docs/src/components/highlight-matches.tsx -// MIT License - -// Copyright (c) 2020 Shu Ding - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -import escapeStringRegexp from "escape-string-regexp"; -import type { ReactElement, ReactNode } from "react"; -import { memo } from "react"; - -type MatchArgs = { - value?: string; - match: string; -}; - -export const HighlightMatches = memo(function HighlightMatches({ - value, - match, -}: MatchArgs): ReactElement | null { - if (!value) { - return null; - } - const splitText = value.split(""); - const escapedSearch = escapeStringRegexp(match.trim()); - const regexp = new RegExp(escapedSearch.replaceAll(/\s+/g, "|"), "ig"); - let result; - let index = 0; - const content: (string | ReactNode)[] = []; - - while ((result = regexp.exec(value))) { - if (result.index === regexp.lastIndex) { - regexp.lastIndex++; - } else { - const before = splitText.splice(0, result.index - index).join(""); - const after = splitText - .splice(0, regexp.lastIndex - result.index) - .join(""); - content.push( - before, - - {after} - , - ); - index = regexp.lastIndex; - } - } - - return ( - <> - {content} - {splitText.join("")} - - ); -}); diff --git a/components/nextra/Input.tsx b/components/nextra/Input.tsx deleted file mode 100644 index 203e016..0000000 --- a/components/nextra/Input.tsx +++ /dev/null @@ -1,52 +0,0 @@ -// Forked from https://github.com/shuding/nextra/blob/2e78fe5f52a523399eb491fe525b67c7534f2f0e/packages/nextra-theme-docs/src/components/input.tsx -// MIT License - -// Copyright (c) 2020 Shu Ding - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -import cn from "clsx"; -import type { ComponentProps, ReactNode } from "react"; -import { forwardRef } from "react"; - -type InputProps = ComponentProps<"input"> & { suffix?: ReactNode }; - -export const Input = forwardRef( - ({ className, suffix, ...props }, forwardedRef) => ( -
- - {suffix} -
- ), -); - -Input.displayName = "Input"; diff --git a/components/nextra/Search.tsx b/components/nextra/Search.tsx deleted file mode 100644 index b1f38c1..0000000 --- a/components/nextra/Search.tsx +++ /dev/null @@ -1,324 +0,0 @@ -// Forked from https://github.com/shuding/nextra/blob/2e78fe5f52a523399eb491fe525b67c7534f2f0e/packages/nextra-theme-docs/src/components/search.tsx -// MIT License - -// Copyright (c) 2020 Shu Ding - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -import { Transition } from "@headlessui/react"; -import cn from "clsx"; -import { useRouter } from "next/router"; -import { Link } from "nextra-theme-docs"; -import { useMounted } from "nextra/hooks"; -import { InformationCircleIcon, SpinnerIcon } from "nextra/icons"; -import { usePostHog } from "posthog-js/react"; -import type { CompositionEvent, KeyboardEvent, ReactElement } from "react"; -import { Fragment, useCallback, useEffect, useRef, useState } from "react"; -import { Input } from "./Input"; -import { SearchResult } from "./types"; - -type SearchProps = { - className?: string; - overlayClassName?: string; - value: string; - onChange: (newValue: string) => void; - onActive?: (active: boolean) => void; - loading?: boolean; - error?: boolean; - results: SearchResult[]; -}; - -const INPUTS = ["input", "select", "button", "textarea"]; - -export function Search({ - className, - overlayClassName, - value, - onChange, - onActive, - loading, - error, - results, -}: SearchProps): ReactElement { - const [show, setShow] = useState(false); - const [active, setActive] = useState(0); - const router = useRouter(); - // const { setMenu } = useMenu(); - const input = useRef(null); - const ulRef = useRef(null); - const [focused, setFocused] = useState(false); - // Trigger the search after the Input is complete for languages like Chinese - const [composition, setComposition] = useState(true); - const posthog = usePostHog(); - - useEffect(() => { - setActive(0); - }, [value]); - - useEffect(() => { - const down = (e: globalThis.KeyboardEvent): void => { - const activeElement = document.activeElement as HTMLElement; - const tagName = activeElement?.tagName.toLowerCase(); - if ( - !input.current || - !tagName || - INPUTS.includes(tagName) || - activeElement?.isContentEditable - ) - return; - if ( - e.key === "/" || - (e.key === "k" && - (e.metaKey /* for Mac */ || /* for non-Mac */ e.ctrlKey)) - ) { - e.preventDefault(); - // prevent to scroll to top - input.current.focus({ preventScroll: true }); - } else if (e.key === "Escape") { - setShow(false); - input.current.blur(); - } - }; - - window.addEventListener("keydown", down); - return () => { - window.removeEventListener("keydown", down); - }; - }, []); - - const finishSearch = useCallback(() => { - posthog?.capture("search", { query: input.current.value }); - input.current?.blur(); - onChange(""); - setShow(false); - // setMenu(false); - // }, [onChange, setMenu]); - }, [onChange]); - - const handleActive = useCallback( - (e: { currentTarget: { dataset: DOMStringMap } }) => { - const { index } = e.currentTarget.dataset; - setActive(Number(index)); - }, - [], - ); - - const handleKeyDown = useCallback( - function (e: KeyboardEvent) { - switch (e.key) { - case "ArrowDown": { - if (active + 1 < results.length) { - const el = ulRef.current?.querySelector( - `li:nth-of-type(${active + 2}) > a`, - ); - if (el) { - e.preventDefault(); - handleActive({ currentTarget: el }); - el.focus(); - } - } - break; - } - case "ArrowUp": { - if (active - 1 >= 0) { - const el = ulRef.current?.querySelector( - `li:nth-of-type(${active}) > a`, - ); - if (el) { - e.preventDefault(); - handleActive({ currentTarget: el }); - el.focus(); - } - } - break; - } - case "Enter": { - const result = results[active]; - if (result && composition) { - void router.push(result.route); - finishSearch(); - } - break; - } - case "Escape": { - setShow(false); - input.current?.blur(); - break; - } - } - }, - [active, results, router, finishSearch, handleActive, composition], - ); - - const mounted = useMounted(); - const renderList = show && Boolean(value); - - const icon = ( - - { - onChange(""); - }} - > - {value && focused - ? "ESC" - : mounted && - (navigator.userAgent.includes("Macintosh") ? ( - <> - K - - ) : ( - "CTRL K" - ))} - - - ); - const handleComposition = useCallback( - (e: CompositionEvent) => { - setComposition(e.type === "compositionend"); - }, - [], - ); - - return ( -
- {renderList && ( -
setShow(false)} - /> - )} - - { - const { value } = e.target; - onChange(value); - setShow(Boolean(value)); - }} - onFocus={() => { - onActive?.(true); - setFocused(true); - }} - onBlur={() => { - setFocused(false); - }} - onCompositionStart={handleComposition} - onCompositionEnd={handleComposition} - type="search" - placeholder="Search Documentation..." - onKeyDown={handleKeyDown} - suffix={icon} - /> - - -
    - {error ? ( - - - Error while searching. - - ) : loading ? ( - - - - - - Loading... - - - ) : results.length > 0 ? ( - results.map(({ route, prefix, children, id }, i) => ( - - {prefix} -
  • - - {children} - -
  • -
    - )) - ) : ( -
    No Results
    - )} -
-
-
- ); -} diff --git a/components/nextra/types.ts b/components/nextra/types.ts deleted file mode 100644 index ee2c944..0000000 --- a/components/nextra/types.ts +++ /dev/null @@ -1,31 +0,0 @@ -// Forked from https://github.com/shuding/nextra/blob/2e78fe5f52a523399eb491fe525b67c7534f2f0e/packages/nextra-theme-docs/src/types.ts -// MIT License - -// Copyright (c) 2020 Shu Ding - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -import { ReactNode } from "react"; - -export type SearchResult = { - children: ReactNode; - id: string; - prefix?: ReactNode; - route: string; -}; diff --git a/components/overview-page.tsx b/components/overview-page.tsx new file mode 100644 index 0000000..8c2d81e --- /dev/null +++ b/components/overview-page.tsx @@ -0,0 +1,38 @@ +// Adapted from the nextra docs site. + +import { useMDXComponents as getMDXComponents } from '@/mdx-components' +import type { PageMapItem } from 'nextra' +import { Cards } from 'nextra/components' +import { getIndexPageMap, getPageMap } from 'nextra/page-map' + +type Props = { + filePath: string + pageMap?: PageMapItem[] +} + +export const OverviewPage = async ({ filePath, pageMap: $pageMap }: Props) => { + const { h2: H2 } = getMDXComponents() + // NOTE: this is pretty hacky - it's reaching into nextra internals. a route looks like + // /docs/foo/bar/baz, and a filepath is the filepath to this content. + const currentRoute = filePath.replace('content', '/docs').replace('/index.mdx', '') + const pageMap = $pageMap ?? (await getPageMap(currentRoute)) + + return getIndexPageMap(pageMap).map((pageItem, index) => { + if (!Array.isArray(pageItem)) { + return

{pageItem.title}

+ } + return ( + + {pageItem.map(item => ( + + ))} + + ) + }) +} diff --git a/components/swagger.tsx b/components/swagger.tsx index 1f84486..4d2d853 100644 --- a/components/swagger.tsx +++ b/components/swagger.tsx @@ -1,3 +1,5 @@ +"use client" + import dynamic from "next/dynamic"; import type { SwaggerUIProps } from "swagger-ui-react"; @@ -11,7 +13,7 @@ const SwaggerUI = dynamic( import "swagger-ui-react/swagger-ui.css"; -export function Swagger(props: {}) { +export function Swagger() { return ( + + + + + + ); +} diff --git a/content/_meta.ts b/content/_meta.ts new file mode 100644 index 0000000..3946520 --- /dev/null +++ b/content/_meta.ts @@ -0,0 +1,24 @@ +import type { MetaRecord } from 'nextra' + +export default { + index: { + title: "Documentation", + display: "hidden", + }, + spicedb: { + title: "SpiceDB Documentation", + type: "page", + }, + authzed: { + title: "AuthZed Product Documentation", + type: "page", + }, + "best-practices": { + title: "Best Practices", + type: "page", + }, + mcp: { + title: "MCP", + type: "page", + }, +} satisfies MetaRecord; diff --git a/content/authzed/_meta.ts b/content/authzed/_meta.ts new file mode 100644 index 0000000..624fff5 --- /dev/null +++ b/content/authzed/_meta.ts @@ -0,0 +1,8 @@ +import type { MetaRecord } from 'nextra' + +export default { + guides: "Guides", + concepts: "Concepts", + links: "Links", + api: "API Reference", +} satisfies MetaRecord; diff --git a/content/authzed/api/_meta.ts b/content/authzed/api/_meta.ts new file mode 100644 index 0000000..c026257 --- /dev/null +++ b/content/authzed/api/_meta.ts @@ -0,0 +1,15 @@ +import type { MetaRecord } from 'nextra' + +export default { + "grpc-api": { + title: "gRPC API Reference", + href: "https://buf.build/authzed/api/docs/main:authzed.api.v1", + }, + "http-api": { + title: "HTTP API Reference", + }, + "cloud-api": { + title: "Cloud API Reference", + href: "https://www.postman.com/authzed/spicedb/collection/5fm402n/authzed-cloud-api", + }, +} satisfies MetaRecord; diff --git a/content/authzed/api/http-api.mdx b/content/authzed/api/http-api.mdx new file mode 100644 index 0000000..5304c61 --- /dev/null +++ b/content/authzed/api/http-api.mdx @@ -0,0 +1,5 @@ +import { Swagger } from "../../../components/swagger"; + +# HTTP API Documentation + + diff --git a/content/authzed/concepts/audit-logging.mdx b/content/authzed/concepts/audit-logging.mdx new file mode 100644 index 0000000..b04af07 --- /dev/null +++ b/content/authzed/concepts/audit-logging.mdx @@ -0,0 +1,199 @@ +import { Callout, Tabs } from "nextra/components"; + +# Audit Logging + +Audit Logging is functionality exclusive to AuthZed products that publishes logs of SpiceDB API operations to a log sink. + +## Log Format + +Logs contain the full details related to a request including: + +- API Token hash +- Request Method +- Request Body +- Request IP +- Response Body +- Errors (if any) + +### Example + +#### CheckPermission + +```json +{ + "specversion": "1.0", + "id": "819b4d52db4797491e31d0228f381543", + "source": "/ps/dev-ps/rc/us-east-1/p/dev-ps-abcd1234", + "type": "/authzed.api.v1.PermissionsService/CheckPermission", + "datacontenttype": "application/json", + "time": "2023-12-18T17:33:11.783093248Z", + "data": { + "request": { + "@type": "type.googleapis.com/authzed.api.v1.CheckPermissionRequest", + "consistency": { + "minimizeLatency": true + }, + "resource": { + "objectType": "resource", + "objectId": "firstdoc", + "permission": "read" + }, + "subject": { + "object": { + "objectType": "user", + "objectId": "tom" + } + }, + "response": { + "@type": "type.googleapis.com/authzed.api.v1.CheckPermissionResponse", + "checkedAt": { + "token": "GgoKCENKcmt4QTA9" + }, + "permissionship": "PERMISSIONSHIP_HAS_PERMISSION" + }, + "metadata": { + "token_hash": "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", + "user-agent": "grpc-go/1.58.3", + "x-request-id": "819b4d52db4797491e31d0228f381543" + } + } + } +} +``` + +#### ReadSchema + +```json +{ + "specversion": "1.0", + "id": "35cdd6662882bd387292ef78a650d18b", + "source": "/ps/dev-ps/rc/us-east-1/p/dev-ps-abcd1234", + "type": "/authzed.api.v1.SchemaService/ReadSchema", + "datacontenttype": "application/json", + "time": "2023-12-18T17:32:47.234247Z", + "data": { + "request": { + "@type": "type.googleapis.com/authzed.api.v1.ReadSchemaRequest" + }, + "response": { + "@type": "type.googleapis.com/authzed.api.v1.ReadSchemaResponse", + "schemaText": "definition folder {\n\trelation reader: user | service\n\tpermission read = reader\n}\n\ndefinition resource {\n\trelation reader: user | service\n\tpermission read = reader\n}\n\ndefinition service {}\n\ndefinition user {}", + "readAt": { + "token": "GhUKEzE3MDI5MjA0MjcxMjM2MDIwMDA=" + } + }, + "metadata": { + "token_hash": "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", + "user-agent": "grpc-go/1.58.3", + "x-request-id": "35cdd6662882bd387292ef78a650d18b" + } + } +} +``` + +## Log Sinks + +Log Sinks are the targets where logs will be shipped in order to be persisted. + + + **Info:** + We're exploring additional Log Sinks. + +Please reach out to your success team with any requests. + + + +### AWS Kinesis and Kinesis Firehose + +As a prerequisite to use [Kinesis] or [Kinesis Firehose] as a log sink, an IAM role must exist in the AWS account +with the necessary permissions to write to the Kinesis stream or Firehose delivery stream. + +This is an example policy that grants the necessary permissions to write to a Firehose delivery stream: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": ["firehose:PutRecord"], + "Resource": "EXAMPLE_FIREHOSE_ARN" + } + ] +} +``` + +[Kinesis]: https://aws.amazon.com/kinesis +[Kinesis Firehose]: https://docs.aws.amazon.com/firehose/latest/dev/what-is-this-service.html + +#### Dedicated + +For Dedicated customers, this role will also need a trust policy which allows the role to be assumed by the AuthZed account +in order to deliver logs to the stream. + +To find the Dedicated AWS account ID, navigate to the Permission System's settings page, find the Audit Log settings, +and choose "AWS Kinesis" or "AWS Kinesis Firehose" as the log sink. +The account ID will be displayed in the configuration. + +This is an example trust policy that allows the AuthZed account to assume the role: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "EXAMPLE_AUTHZED_ACCOUNT_ID" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "EXAMPLE_EXTERNAL_ID" + } + } + } + ] +} +``` + +## Configuration + +The process for setting up audit logging varies depending on the AuthZed product you're using. + +### Dedicated & Cloud + +Using the web dashboard, navigate to the Permission System's settings page to find the Audit Log settings. + +### Self-Hosted + +Use the following command-line flags: + +| Flag | Description | Default | +| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| --extender-enabled | must be set to`authzed-audit` | | +| --extender-audit-batch-size-limit | defines the maximum number of audit events to be processed as a unit | `10000` | +| --extender-audit-buffer-size | defines the size of the audit log buffer that holds events to be processed by workers | `1000000` | +| --extender-audit-buffer-window | defines maximum amount of time events are buffered before being pushed | `1s` | +| --extender-audit-disabled-on-methods strings | list of comma-separated, fully-qualified API methods to disable events for. Watch API is always excluded (e.g. `/authzed.api.v1.PermissionsService/CheckPermission`) | | +| --extender-audit-initial-retry-interval duration | sets the first retry backoff in case of a failure to push audit events to the backend | `1s` | +| --extender-audit-max-retry-interval duration | sets the maximum backoff duration in case of failure to push events | `30s` | +| --extender-audit-retry-randomizer-factor | sets the randomization factor for the backoff duration - this helps prevent thundering herds on event push errors | `0.5` | +| --extender-audit-stream-name | defines the name of the target stream/topic (e.g. Kafka Topic, Kinesis Stream...) | `spicedb` | +| --extender-audit-target-configuration | target-type specific configuration | `[]` | +| --extender-audit-target-endpoint-url string | defines the URL of target endpoint to ingest audit events. If left unspecified, some types will try to determine automatically (e.g. AWS SDK) | | +| --extender-audit-target-type | defines the type of target to ingest audit events | `noop` | +| --extender-audit-worker-count | defines the number of worker goroutines to process audit events (default 5) | | + +### Tokens in Audit Logs + +To identify which token was used for a request, look up its SHA-256 hash in the audit log's metadata. +Each request's `token_hash` field contains the hashed value of the original token used. + +Example of generating a SHA-256 hash: + +```bash +$ python3 -c "import hashlib; print(hashlib.sha256(''.encode()).hexdigest())" +> 2ffb5caf16962e4371bc036d8de2a99dfcea1ae70091ef2953d633a88d05321a +``` diff --git a/content/authzed/concepts/authzed-materialize.mdx b/content/authzed/concepts/authzed-materialize.mdx new file mode 100644 index 0000000..4155a44 --- /dev/null +++ b/content/authzed/concepts/authzed-materialize.mdx @@ -0,0 +1,612 @@ +import { Callout } from "nextra/components"; + +# AuthZed Materialize + + + AuthZed Materialize is available to users of AuthZed [Dedicated] as part of an + early access program. Don't hesitate to get in touch with your AuthZed account + team if you would like to participate. + + +AuthZed Materialize takes inspiration from the Leopard index component described in the [Zanzibar paper](https://zanzibar.tech/2IoYDUFMAE:0:T). +Much like the concept of a materialized view in relational databases, AuthZed Materialize is a service that you configure with a list of permissions that you want it to precompute, and it will calculate how those permissions change after relationships +are written (specifically, when those relationships affect a subject's membership in a permission set or a set’s permission on a specific resource), or when a new schema is written. +These precomputed permissions can then be used either to provide faster checks and lookups through Accelerated Queries, or streamed to your own application database to do operations like searching, sorting, and filtering much more efficiently. + +In summary, AuthZed Materialize allows you to: + +- Speed up `CheckPermission` and `CheckBulkPermissions`. +- Speed up `LookupResources` and `LookupSubjects`, especially when there is a large number of resources. +- Build authorization-aware UIs, e.g. by providing a filtered and/or sorted list of more than several thousand authorized objects. +- Perform ACL filtering in other secondary indexes, like a search index (e.g. Elasticsearch). + +[Dedicated]: ../guides/picking-a-product#dedicated + +## Limitations + +- Your schema can contain any of the following, but they cannot be on the path of your configured Materialize permissions or it will throw an error: + - [Caveats] + - [Wildcard] subject types + - [.all intersections] + +- [Expiring relationships] aren't supported. +- Materialize takes time to compute the denormalized relationship updates, so if you are streaming the changes to your database, your application must be able to tolerate some lag. + +[Caveats]: https://authzed.com/docs/spicedb/concepts/caveats +[Wildcard]: https://authzed.com/docs/spicedb/concepts/schema#wildcards +[.all intersections]: https://authzed.com/docs/spicedb/concepts/schema#all-intersection-arrow +[expiring relationships]: https://authzed.com/docs/spicedb/concepts/expiring-relationships +[Dedicated]: ../guides/picking-a-product#dedicated + +## Client SDK + +All SpiceDB SDKs have the generated gRPC and protobuf code + +- [authzed-go v0.15.0](https://github.com/authzed/authzed-go/releases/tag/v0.15.0) +- [authzed-java 0.10.0](https://github.com/authzed/authzed-java/releases/tag/0.10.0) +- [authzed-py v0.17.0](https://github.com/authzed/authzed-py/releases/tag/v0.17.0) +- [authzed-rb v0.11.0](https://github.com/authzed/authzed-rb/releases/tag/v0.11.0) +- [authzed-node v0.17.0](https://github.com/authzed/authzed-node/releases/tag/v0.17.0) + +AuthZed Materialize's gRPC API definition is available from [API version 1.35](https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0) + +## Recommended Architecture + +### Consuming Client + +![authzed-materialize](/images/authzed-materialize.png) + +Customers will need to build a client to act as an "event processor" that consumes permission updates and writes those updates to a datastore like Postgres. +The consumer should be designed with resumability in mind by keeping track of the last revision consumed, just as any other stream processor. + +### Durability + +Every SpiceDB permission update will come with a `ZedToken`. +The consumer must keep track of that revision token to be able to resume the change stream from the last event consumed when a failure happens, like stream disconnection, consumer restart, or server-side restarts. + +When a consumer failure happens, the process should determine the last revision `ZedToken` consumed, and send that alongside your request. +The consumer should be coded with idempotency in mind in the event of such failures, meaning it should be prepared to process stream messages that have already been processed. + +Storing the revision `ZedToken` in the same database where the computed permissions are being stored is a good practice as it enables storing those transactionally, which gives you the guarantee that whatever revision the consumer restarts from, won’t cause events to be skipped, which would lead to an inconsistent state of the world. + +There may be scenarios where a revision has so many changes that storing transactionally can degrade the performance/availability of the target database. +In situations like these, one may want to store the events in batches, and in such cases, the revision should only be stored when the consumer determines the last batch has been processed. +If a failure happened in between those batches, the consumer will be able to restart processing from the start of the revision and idempotently overwrite whatever events were already in place. + + + Change events are stored up to 24h to make sure Materialize storage does not + grow unbounded and affect its performance. + + +## Configuration + +Just as with relational database materialized views, you need to provide Materialize with the "queries" you’d like it to pre-compute. +The configuration is described as a list of `resource#permission@subject` tuples. +Example: + +```zed +resource#view@user +resource#edit@user +``` + + + During early access provisioning, Materialize instances are not self-service, + so you’ll need to provide the permissions to be computed by Materialize + directly to your AuthZed account team. + + +### Relational Database + +You can find a runnable version of these examples [here](https://dbfiddle.uk/dX10Cu3Z). + +These are tables you likely already have in your database + +1. something representing the user +2. something representing the object we want to filter + +```sql +CREATE TABLE users ( + id varchar(100) PRIMARY KEY, + name varchar(40) +); +CREATE TABLE documents ( + id varchar(100) PRIMARY KEY, + name varchar(40), + contents_bucket varchar(100) +); +``` + +The `member_to_set` and `set_to_set` tables below are just used to track data from [LookupPermissionSets] and [WatchPermissionSets], all you need to do is store the fields directly from those APIs. + +```sql +CREATE TABLE member_to_set ( + member_type varchar(100), + member_id varchar(100), + member_relation varchar(100), + set_type varchar(100), + set_id varchar(100), + set_relation varchar(100) +); + +CREATE TABLE set_to_set ( + child_type varchar(100), + child_id varchar(100), + child_relation varchar(100), + parent_type varchar(100), + parent_id varchar(100), + parent_relation varchar(100) +); +``` + +Seed some base data; this would already exist in the application: + +```sql +INSERT INTO users (id, name) VALUES ('123', 'evan'), ('456', 'victor'); +INSERT INTO documents (id, name) VALUES ('123', 'evan secret doc'), ('456', 'victor shared doc'); +``` + +Sync data from [LookupPermissionSets]/[WatchPermissionSets]. +The APIs return type/id/relation name: + +```sql +INSERT INTO member_to_set (member_type, member_id, member_relation, set_type, set_id, set_relation) + VALUES ('user', '123', '', 'document', '123', 'view'), + ('user', '123', '', 'group', 'shared', 'member'), + ('user', '456', '', 'group', 'shared', 'member'); + +INSERT INTO set_to_set (child_type, child_id, child_relation, parent_type, parent_id, parent_relation) + VALUES ('group', 'shared', 'member', 'document', '456', 'view'); +``` + +To query, join the local application data with [LookupPermissionSets]/[WatchPermissionSets] data to filter by specific permissions. + +Find all documents `evan` can `view:` + +```sql +SELECT d.id FROM documents d + LEFT JOIN set_to_set s2s ON d.id = s2s.parent_id + INNER JOIN member_to_set m2s ON (m2s.set_id = s2s.child_id AND m2s.set_type = s2s.child_type AND m2s.set_relation = s2s.child_relation) OR (d.id = m2s.set_id ) + INNER JOIN users u ON u.id = m2s.member_id + WHERE + u.name = 'evan' AND + m2s.member_type = 'user' AND + m2s.member_relation = '' AND (( + s2s.parent_type = 'document' AND + s2s.parent_relation='view' + ) OR ( + m2s.set_type = 'document' AND + m2s.set_relation = 'view' + )); +``` + +| id | +| :-- | +| 123 | +| 456 | + +The same query, by changing only the username, will find all documents `victor` can `view`: + +```sql +SELECT d.id FROM documents d + LEFT JOIN set_to_set s2s ON d.id = s2s.parent_id + INNER JOIN member_to_set m2s ON (m2s.set_id = s2s.child_id AND m2s.set_type = s2s.child_type AND m2s.set_relation = s2s.child_relation) OR (d.id = m2s.set_id ) + INNER JOIN users u ON u.id = m2s.member_id + WHERE + u.name = 'victor' AND + m2s.member_type = 'user' AND + m2s.member_relation = '' AND (( + s2s.parent_type = 'document' AND + s2s.parent_relation='view' + ) OR ( + m2s.set_type = 'document' AND + m2s.set_relation = 'view' + )); +``` + +| id | +| :-- | +| 456 | + +The above example shows the most flexible way to do this: you can update your SpiceDB schema and sync new permission sets data without SQL schema changes but at the cost of more verbose SQL queries. + +If you know that you only care about `document#view@user,` then you can store the data more concisely and query more simply. +This strategy can also be used to shard the data coming from the Materialize APIs so that it does not all land in one table. + +Simplified permission sets storage (just for `document#view@user`): + +```sql +CREATE TABLE user_to_set ( + user_id varchar(100), + parent_set varchar(300) +); + +CREATE TABLE set_to_document_view ( + child_set varchar(300), + document_id varchar(100) +); +``` + +Storing from [LookupPermissionSets]/[WatchPermissionSets] in this model requires some simple transformations compared to the previous example: + +```sql +INSERT INTO user_to_set (user_id, parent_set) + VALUES ('123', 'document:123#view'), + ('123', 'group:shared#member'), + ('456', 'group:shared#member'); + +INSERT INTO set_to_document_view (child_set, document_id) + VALUES ('document:123#view', '123'), + ('group:shared#member', '456'); +``` + +Note that an extra entry (`document:123#view`, `123`) was added to simplify the join side (avoiding the `left join` in the previous example). +The queries are a bit simpler, though they can't be used to answer any permission check other than `document#view@user`. + +Find all documents `evan` can `view`: + +```sql +SELECT d.id FROM documents d + INNER JOIN set_to_document_view s2s ON d.id = s2s.document_id + INNER JOIN user_to_set m2s ON m2s.parent_set = s2s.child_set + INNER JOIN users u ON u.id = m2s.user_id + WHERE u.name = 'evan'; +``` + +| id | +| :-- | +| 123 | +| 456 | + +Find all documents `victor` can `view`: + +```sql +SELECT d.id FROM documents d + INNER JOIN set_to_document_view s2s ON d.id = s2s.document_id + INNER JOIN user_to_set m2s ON m2s.parent_set = s2s.child_set + INNER JOIN users u ON u.id = m2s.user_id + WHERE u.name = 'victor'; +``` + +| id | +| :-- | +| 456 | + +## API Specification + +### [WatchPermissionSets] + +This is an update stream of all the permissions Materialize is configured to watch. +You can use this to store all permissions tracked in the system closer to your application database to be used in database-native ACL filtering. +Permissions can also be stored in secondary indexes like Elasticsearch. + +The API consists of various event types that capture deltas that occurred since a client started listening. +It will also notify of events like a [breaking schema change] that necessitate rebuilding of the index. + +#### Request + +```json +{ + "optional_starting_after": "the_zed_token" +} +``` + +The `optional_starting_after` field in the request denotes the SpiceDB revision to start streaming changes. +It will start streaming from the revision right after the indicated one. +If no `optional_starting_after` is provided, Materialize will determine the latest revision at the moment of the request, and start streaming changes from there on. + +#### Response + +##### Revision Checkpoint Event + +Sent when changes happened in SpiceDB, but didn't affect Materialize. +Customers should keep track of this revision in their internal database to know where to resume from in the event of stream disconnection or stream consumer restart/failure. + +```json +{ + "completed_revision": { + "token": "GiAKHjE3MTUzMzkzMTAzODQ2NDMxNzguMDAwMDAwMDAwMA==" + } +} +``` + +##### Member Added To Set Event + +```json +{ + "change": { + "at_revision": { + "token": "GiAKHjE3MTUzMzkzMDg0MTY2NzUxNzcuMDAwMDAwMDAwMA==" + }, + "operation": "SET_OPERATION_ADDED", + "parent_set": { + "object_type": "thumper/resource", + "object_id": "seconddoc", + "permission_or_relation": "reader" + }, + "child_member": { + "object_type": "thumper/user", + "object_id": "fred", + "optional_permission_or_relation": "" + } + } +} +``` + +##### Member Removed From Set Event + +```json +{ + "change": { + "at_revision": { + "token": "GiAKHjE3MTUzMzkzMTAzODQ2NDMxNzguMDAwMDAwMDAwMA==" + }, + "operation": "SET_OPERATION_REMOVED", + "parent_set": { + "object_type": "thumper/resource", + "object_id": "seconddoc", + "permission_or_relation": "reader" + }, + "child_member": { + "object_type": "thumper/user", + "object_id": "fred", + "optional_permission_or_relation": "" + } + } +} +``` + +##### Set Added To Set Event + +```json +{ + "change": { + "at_revision": { + "token": "GiAKHjE3MTUzMzkzMDg0MTY2NzUxNzcuMDAwMDAwMDAwMA==" + }, + "operation": "SET_OPERATION_ADDED", + "parent_set": { + "object_type": "thumper/resource", + "object_id": "seconddoc", + "permission_or_relation": "reader" + }, + "child_set": { + "object_type": "thumper/team", + "object_id": "engineering", + "permission_or_relation": "members" + } + } +} +``` + +##### Set Removed From Set Event + +```json +{ + "change": { + "at_revision": { + "token": "GiAKHjE3MTUzMzkzMTAzODQ2NDMxNzguMDAwMDAwMDAwMA==" + }, + "operation": "SET_OPERATION_REMOVED", + "parent_set": { + "object_type": "thumper/resource", + "object_id": "seconddoc", + "permission_or_relation": "reader" + }, + "child_set": { + "object_type": "thumper/team", + "object_id": "engineering", + "permission_or_relation": "members" + } + } +} +``` + +##### [Breaking Schema Change] Event + +When the origin SpiceDB instance introduces a schema change that invalidates all currently computed permission sets, Materialize will issue a special event indicating this happened: + +```json +{ + "breaking_schema_change": { + "change_at": { + "token": "GiAKHjE3MTUzMzkzMTAzODQ2NDMxNzguMDAwMDAwMDAwMA==" + } + } +} +``` + +The event indicates the revision at which the schema change happened. + +When the client receives this event, all previously indexed permission sets are rendered stale, and the client must rebuild the index with a call to [LookupPermissionSets] at the revision the schema change was introduced. + +Not every change to the origin permission system schema is considered breaking. + +###### Detecting Breaking Schema Changes In Development Environment + +The AuthZed team has optimized Materialize to reduce the number of instances where a change is considered breaking and thus renders permission set stale. +To determine if a schema change is breaking, we provide the `materialize-cli` tool. + + + `materialize-cli` is still in early development, please reach out to us if you + want to try it as part of AuthZed Materialize early access. + + +#### Errors + +##### FailedPrecondition: Revision Does Not Exist + +Whenever the client receives a `FailedPrecondition`, they should retry with a backoff. +In this case, the client is asking for a revision that hasn’t been yet processed by Materialize. +You may receive this error when: + +- the Materialize instances are restarting and catching up with all changes that have happened since it took a snapshot of your SpiceDB instance. +- A [BreakingSchemaChange] was emitted, and by happenstance, your client had to reconnect. + The Materialize server hasn’t yet rebuilt a new snapshot of your SpiceDB instance with the new schema to serve new events. + +### [LookupPermissionSets] + +This API complements [WatchPermissionSets]. +When you first bring on a system that needs permissions data, [LookupPermissionSets] lets you create an initial snapshot of the permissions data, and then you can use the [WatchPermissionSets] API to keep the snapshot updated. + +The API is resumable via cursors, meaning that the client is responsible for specifying the cursor that denotes the permission set to start streaming from, and the number of them to be streamed. +If no cursor is provided, Materialize will start streaming from what it considers the first change. +The number of events to stream is required. + +The API also supports specifying an optional revision via the `optional_at_revision`, which indicates Materialize should start streaming events at a revision at least as fresh as the provided one. +The server will guarantee the revision selected is equal to or more recent than the requested revision. +This is useful when the client has been notified a [breaking schema change] occurred and that they should rebuild their indexes. +If both `optional_at_revision` and `optional_starting_after` are provided, the latter always takes precedence. + + + Client **must** provide the revision token after a [breaking schema change] + through `optional_starting_after`, otherwise Materialize will start streaming + permission sets for whatever snapshot revision is available at the moment, and + won't reflect the schema changes. + + +The current cursor is provided with each event in the stream, so if the consumer client crashes it knows where to restart from, alongside the revision at which the data is computed. +Once an event is received, the recommended course of action is to store the following as part of the same transaction in your database: + +- The data to insert into the permission sets table +- The cursor into a table denoting the current state of the backfill +- The current revision token into a table denoting the snapshot revision of the stored Materialize data + +In the event of the customer consumer being restarted, it should: + +- Select the current cursor from the backfill cursor table +- Issue a [LookupPermissionSets] request with `optional_starting_after` set to the stored cursor +- Resume ingestion as usual + + + While AuthZed treats correctness very seriously, bugs may be identified that + affect the correctness of the denormalized permissions computed by + Materialize. Those incidents should be rare, but consumers must have all the + machinery in place to re-index via [LookupPermissionSets] at any given time. + + +#### Reindexing After A Breaking Schema Change + +Another scenario for invoking [LookupPermissionSets] is after a [breaking schema change] written to the origin SpiceDB instance. +In this case, the index is rendered stale and a client must rebuild it by calling [LookupPermissionSets] at the revision the schema change was introduced. +During this period, the previously ingested permission sets data will be stale. +We are working on several options to minimize the lag caused and improve the developer experience: + +- On-Band: Stream breaking schema changes over [WatchPermissionSets] instead of requiring a [LookupPermissionSets] call. + This will reduce the amount of changes to stream and reduce the time to reindex. +- Off-Band: Support Staging Schemas in SpiceDB so that your application can call [LookupPermissionSets] over the staged schema changes + +These are the two recommended strategies to handle breaking schema events in your application: + +##### On-Band LookupPermissionSets Ingestion + +With on-band ingestion, your application reindexes all permission set data right after receiving a [breaking schema change]. +This will naturally lead to lag, but depending on the volume of data, your application may be able to withstand this. +The tradeoff here is development velocity versus lag. +If your application can't withstand lag during reindexing, please consider the off-band strategy. + +In this scenario, we recommend using the _versioned permission set tables strategy_: your application will keep track of various versions of the permission set. +One will be the currently ingested and being updated with [WatchPermissionSets], and the new version is the result of a [BreakingSchemaChange] and is ingested with [LookupPermissionSets] while the previous version of the permission sets are being served. +You should keep track of what is the current revision being served. + +##### Off-Band LookupPermissionSets Ingestion + +With an off-band ingestion strategy the client will avoid the lag by following a strategy similar to non-breaking relational database migrations: by transforming your schema following a four-phase migration. + +A new permission will be written to your SpiceDB schema that includes the changes, and it will be added to a new Materialize instance run in parallel to the current one, similar to a blue/green deployment. +You will be able to run [LookupPermissionSets] against the new instance to obtain all the permission sets plus the ones corresponding to the newly added permission. +Once your index is ingested and is updated with [WatchPermissionSets], your application should be able to switch to use the new permission and the old permission can be dropped from Materialize first, and then from your schema. + +This strategy requires more steps and careful planning, but in exchange completely avoids any lag. + + + For the time being, Materialize instances are not self-serve, so you'll need + to work with your Account Team to execute the off-band ingestion strategy. + + +#### Request + +```json +{ + "limit": "the_number_of_events_to_stream", + "optional_at_revision": "minimum revision to start streaming from", + "optional_starting_after": "continue stream from the specified cursor" +} +``` + +#### Response + +##### Permission Set Sent Over The Stream + +```json +{ + "change": { + "at_revision": { + "token": "GiAKHjE3MTUzMzk0Mzg2MjA4NzI1MDIuMDAwMDAwMDAwMA==" + }, + "operation": "SET_OPERATION_ADDED", + "parent_set": { + "object_type": "thumper/resource", + "object_id": "seconddoc", + "permission_or_relation": "reader" + }, + "child_member": { + "object_type": "thumper/user", + "object_id": "tom", + "optional_permission_or_relation": "" + } + }, + "cursor": { + "limit": 10000, + "token": { + "token": "GiAKHjE3MTUzMzk0Mzg2MjA4NzI1MDIuMDAwMDAwMDAwMA==" + }, + "starting_index": 1, + "completed_members": false + } +} +``` + +The payload comes with the permission set data to store in your database table, and the cursor that points to that permission set in case resumption is necessary. +The computed revision is also provided as part of the request via `at_revision` so that once the permission set is streamed, the consumer knows where to start streaming [WatchPermissionSets] from. + +The consumer should continue to stream permission sets indefinitely until it has not received further messages over the stream. +Please note that the server may return `EOF` to denote the stream is closed, but that does not mean there aren't more changes to serve. +The client **must** open a new stream with the last cursor, and continue streaming until an iteration of the stream yielded zero events. +At this point, the backfill is completed, and the consumer can start processing change events using [WatchPermissionSets], using the stored snapshot revision. + +#### Errors + +##### InvalidArgument: Cursor Limit Does Not Match Request Limit + +The limit specified in the request, and the limit specified in the initiating request that led to the currently provided cursor differ. +To solve this, make sure you use the same limit for the initiating request as for every subsequent request. +The limit is optional once you provide a cursor since it’s stored in it. + +##### FailedPrecondition: Snapshot Not Found For Revision, Try Again Later + +Whenever the client receives a `FailedPrecondition`, they should retry with a backoff. +In this case, the client is asking for a revision that hasn’t been yet processed by Materialize. +You may receive this error when your client calls [LookupPermissionSets] right after receiving [BreakingSchemaChange] through the WatchPermissionsSets API. +The client should retry with the same revision later on. + +##### Aborted: Requested Revision Is No Longer Available + +This error is returned when a new Materialize has deployed a new snapshot of the origin SpiceDB permission system. +This happens on a regular cadence and is part of Materialize's internal maintenance operations. +When this error is returned, it indicates the client should restart [LookupPermissionSets] afresh, dropping the cursor in `optional_starting_after`, and also dropping `optional_at_revision`. +Every previously stored data should also be discarded. +If the volume of data to ingest via [LookupPermissionSets] is large enough it takes many hours to consume, please get in touch with AuthZed support to tweak your instance accordingly. + +### Managing Client State + +This diagram shows the various states your client application will need to transition through when calling the [LookupPermissionSets] and the [WatchPermissionSets] APIs. + +![authzed-materialize](/images/materialize-client-state-diagram.png) + +[WatchPermissionSets]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0#authzed.api.materialize.v0.WatchPermissionSetsService.WatchPermissionSets +[LookupPermissionSets]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0#authzed.api.materialize.v0.WatchPermissionSetsService.LookupPermissionSets +[LookupResources]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.v1#authzed.api.v1.PermissionsService.LookupResources +[LookupSubjects]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.v1#authzed.api.v1.PermissionsService.LookupSubjects +[BreakingSchemaChange]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0#authzed.api.materialize.v0.BreakingSchemaChange +[Breaking Schema Change]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0#authzed.api.materialize.v0.BreakingSchemaChange +[breaking schema change]: https://buf.build/authzed/api/docs/v1.35.0:authzed.api.materialize.v0#authzed.api.materialize.v0.BreakingSchemaChange diff --git a/content/authzed/concepts/expedited-support.mdx b/content/authzed/concepts/expedited-support.mdx new file mode 100644 index 0000000..187d802 --- /dev/null +++ b/content/authzed/concepts/expedited-support.mdx @@ -0,0 +1,83 @@ +import { Callout } from "nextra/components"; + +# Expedited Support + +Expedited Support is an additional service sold by AuthZed to ensure that your workloads are operating with best practices for performance and reliability. + +AuthZed's products come with a default level of support with the ability to upgrade for an additional cost. + +## Paid Plans + +### Overview + +| | **Emergency** | **Silver**
(included with Dedicated) | **Gold** | +| ---------------------------------------------- | ------------- | ---------------------------------------------------- | ---------------------------- | +| **Schema Design** | Best effort | Unlimited | Unlimited | +| **Architecture Overview** | N/A | Unlimited | Unlimited | +| **Email Support Desk** | N/A | Included | Included | +| **Slack** | N/A | Included | Included | +| **Dedicated Account Team** | N/A | Included | Included | +| **Onboarding Calls** | N/A | Four weekly 30-minute calls | Eight weekly 60-minute calls | +| **Development Priority** | N/A | N/A | Included | +| **P1, P2 Response SLA** | Included | N/A | Included | +| **P1** (business critical outage) | 1 Hour | N/A | 1 Hour | +| **P2** (non-prod or partial outage) | 12 Hours | N/A | 12 Hours | +| **P3** (general questions) | N/A | 24 Hours | 24 Hours | + +### Gold + +Gold Support is recommended for those running critical, production SpiceDB workloads. + +Private communication channels are provided for critical support that are integrated with the AuthZed engineering on-call. + +Non-critical support is prioritized through [support.authzed.com][support] or [support@authzed.com][email]. + +[support]: https://support.authzed.com +[email]: mailto:support@authzed.com + +### Emergency + +For those who do not need all of the features of Gold Support, but do need response time SLAs for taking their services to production, we offer our Emergency plan. +This plan only includes those Response Time SLAs for critical and non-critical issues. + +Private communication channels are provided for critical support that are integrated with the AuthZed engineering on-call. + +### Silver Support + +Silver Support is the package best suited for those in pre-production or non-critical environments. + +Dedicated includes Silver support for no additional cost. + +All support is prioritized through [support.authzed.com][support] or [support@authzed.com][email]. + +## Free Plans + +### Basic Support + +Not everyone is a paying customer and that's okay. +In this case, AuthZed will do their best to support you or point you in the right direction for getting what you need to be successful. + +This support is done through [support.authzed.com][support] or [support@authzed.com][email]. + +### Community Support + +Community support is available to absolutely everyone and does not require any form of payment or usage of Authzed products. + +[Discord] is the first line of support for asking questions or getting help with SpiceDB. + +For bug reports, feature requests, or questions [GitHub issues] is also available. + +While AuthZed's products are proprietary, the foundation of these products will always be open source. +Many folks participating in these open source communities are AuthZed customers or employees that have relevant experience to help out. + + + **Warning:** + Help from the community is a volunteer effort. + +Be appreciative of any help you receive and always follow the [Code of Conduct]. + + + +[Discord]: https://authzed.com/discord +[GitHub issues]: https://github.com/authzed/spicedb/issues +[Code of Conduct]: https://github.com/authzed/spicedb/blob/main/CODE-OF-CONDUCT.md diff --git a/content/authzed/concepts/feature-maturity.mdx b/content/authzed/concepts/feature-maturity.mdx new file mode 100644 index 0000000..ac0b13a --- /dev/null +++ b/content/authzed/concepts/feature-maturity.mdx @@ -0,0 +1,31 @@ +# Feature Maturity + +In order to provide a balance between stability and the ability to adopt new functionality in its early stages, features developed by AuthZed are classified under one of three states of maturity. + +## Tech Preview + +Tech Preview features provide the earliest access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process. + +Because Tech Preview features are still under development, AuthZed cannot guarantee the stability of such features. +These features can change dramatically in design or be dropped altogether without ever reaching a stable release. +Because this phase of development is intended for collecting requirements, there are no timelines for future stable releases. + +Customers will be informed by their account teams about the availability of Tech Preview features that might be relevant to them. + +## Early Access + +Early Access features provide early access to upcoming product features that are stable but not necessarily fully featured or self-service. + +Because Early Access features are still under development, AuthZed can only make guarantees about the stability of such features on a case-by-case basis. +These features should not have fundamental changes during this time, but should expand functionality over time. + +Customers will be informed by their account teams about the availability of Early Access features that might be relevant to them. + +## General Availability + +General Availability features provide access to all users the ability to self-service the enablement and configuration of a fully supported feature with accordance to their respective Service Level Agreements (SLAs). + +Generally Available features have been operated in Early Access for a period of at least a month while meeting defined product-level Service Level Objectives (SLOs). +This provides an opportunity for our operations team to define and test all Standard Operating Procedures (SOPs) required. + +Customers should be able to find all Generally Available features within the AuthZed dashboard and documentation websites. diff --git a/content/authzed/concepts/management-dashboard.mdx b/content/authzed/concepts/management-dashboard.mdx new file mode 100644 index 0000000..03c2124 --- /dev/null +++ b/content/authzed/concepts/management-dashboard.mdx @@ -0,0 +1,9 @@ +# Management Dashboard + +The Management Dashboard is a web-based application for organizations to manage SpiceDB deployments. +The dashboard is primarily targeting platform operators that want to configure and monitor the service. + +The Management Dashboard is exclusive to AuthZed [Dedicated] and [Cloud]. + +[Dedicated]: ../guides/picking-a-product#dedicated +[Cloud]: ../guides/picking-a-product#cloud diff --git a/content/authzed/concepts/multi-region.mdx b/content/authzed/concepts/multi-region.mdx new file mode 100644 index 0000000..34fc43d --- /dev/null +++ b/content/authzed/concepts/multi-region.mdx @@ -0,0 +1,5 @@ +# Multi-Region Deployments + +Multi-Region Deployments are a collection of SpiceDB deployments that can be located in various regions across the world, but that all share a single [Datastore]. + +[Datastore]: ../../spicedb/concepts/datastores diff --git a/content/authzed/concepts/private-networking.mdx b/content/authzed/concepts/private-networking.mdx new file mode 100644 index 0000000..fb525a1 --- /dev/null +++ b/content/authzed/concepts/private-networking.mdx @@ -0,0 +1,21 @@ +import { Callout } from "nextra/components"; + +# Private Networking + +Private Networking is functionality exclusive to AuthZed Dedicated that restricts network access to internal networks. +When enabled, connections that are not configured are rejected. + +By adding this additional layer of security, entire classes of security risk are eliminated because only trusted networks have access to the software powering your authorization systems. + +In the scenario you choose not to use Private Networking, AuthZed Dedicated can alternatively be configured for access over the open internet. + + + Private networking is recommended, but optional. Authzed Dedicated can be + configured to allow for connecting from the public internet. + + +## Architecture + +The following diagram models Private Networking on AWS using AWS PrivateLink: + +![Diagram displaying PrivateLink connecting AWS accounts](/images/aws-dedicated-diagram.png) diff --git a/content/authzed/concepts/restricted-api-access.mdx b/content/authzed/concepts/restricted-api-access.mdx new file mode 100644 index 0000000..b8ca1e6 --- /dev/null +++ b/content/authzed/concepts/restricted-api-access.mdx @@ -0,0 +1,315 @@ +import { Callout, Tabs } from "nextra/components"; + +# Restricted API Access + +Restricted API Access is functionality exclusive to AuthZed products that restricts access to SpiceDB for API Tokens. + +This functionality enables organizations to apply the principle of least-privilege to services accessing SpiceDB. +For example, read-only tokens can be created for services that should never need to write to SpiceDB. + +Those familiar with configuring IAM on the major cloud providers should feel comfortable with the basic concepts: + +- Service Accounts +- Tokens +- Roles +- Policies + +## Components + +### Service Accounts + +Service Accounts represent your unique workloads. +We recommend creating a Service Account for each application that will access the SpiceDB API. + +By default Service Accounts have no access to the SpiceDB API; you must apply a Role to gain access. + +### Tokens + +Tokens are long-lived credentials for Service Accounts. +SpiceDB clients must provide a Token in the Authorization header of an API request to perform actions granted to the Service Account. + +Service Accounts can have an arbitrary number of Tokens. + + + We recommend deploying new Tokens before deprovisioning any old Tokens to + avoid downtime. + + +#### Token Format + + + The entire contents of a Token is considered secret. + + +Tokens come in the form of `{prefix}_{key}`. + +Here's what an example Restricted API Access Token looks like. + +``` +sdbst_h256_thisisnotaverysecuresecret +``` + +This is what you should forward in your API calls to your AuthZed Dedicated cluster. + +If you are using [static configuration] in your own SpiceDB Enterprise deployment, you'd need to +generate a token hash to be included in your configuration YAML. +Please note you should hash the cleartext secret without the prefix. +You can generate the hash of a secret as follows: + +```command +echo -n thisisnotaverysecuresecret | sha256sum +``` + +The command should output the hash, which can be referenced in your static configuration + +``` +71c73ba92f2032416b18a4f4fffb2a825755bea6a8430f2622ab1f3fb35a10d0 +``` + +### Roles + +Roles define rules for accessing the SpiceDB API. +Roles are bound to Service Accounts to apply those rules to all API Tokens representing the Service Account. + +Each rule is composed of an SpiceDB API method (e.g. `CheckPermissions`, `WriteRelationships`) and an optional [CEL expression][cel]. +Rules are evaluated at request-time and CEL expressions are provided the request payload in order to dynamically evaluate each request. + +Any public SpiceDB API type is available to the CEL expression so that you can traverse any type and its fields using language operators. +For more details on CEL's language definition, refer to [CEL language specification][cel-lang-spec]. + +The following variables are provided the CEL expression varying based on the request method: + +- `WriteRelationshipsRequest` +- `ReadRelationshipsRequest` +- `DeleteRelationshipsRequest` +- `WriteSchemaRequest` +- `ReadSchemaRequest` +- `CheckPermissionRequest` +- `LookupResourcesRequest` +- `LookupSubjectsRequest` +- `ExpandPermissionTreeRequest` +- `WatchRequest` + +[cel]: https://github.com/google/cel-spec +[cel-lang-spec]: https://github.com/google/cel-spec/blob/81e07d7cf76e7fc89b177bd0fdee8ba6d6604bf5/doc/langdef.md + +### Policies + +Policies are what bind Roles to a Service Account. + +Each policy is composed of a unique identifier for the policy itself, the principal (the target of the role assignment), and any roles being assigned. + +## Task-Specific Configuration + +### `zed backup`/`zed restore` + +To configure a service account for use with `zed backup` and `zed restore`, you'll need the following APIs: + +On a Service Account on the **source** PS: + +```yaml +## For backup +# Exporting relationships +authzed.api/ExportBulkRelationships +authzed.api/BulkExportRelationships + +# Dumping existing schema +authzed.api/ReadSchema +``` + +On a Service Account on the **destination** PS: + +```yaml +## For restore +## Put these on the DESTINATION PS +# Importing relationships +authzed.api/ImportBulkRelationships +authzed.api/BulkImportRelationships + +# Retrying failed relationships +authzed.api/WriteRelationships + +# Writing new schema +authzed.api/WriteSchema +``` + +## Example Rule CEL Expressions + +These are some examples of CEL expressions that you might attach to Permissions on a Role. + +### Resource-type Write Limit + +This CEL expression disables the ability for writes to occur on anything but the provided resource type. + +This is useful for limiting an application to only be able to perform writes to SpiceDB for the type objects that it owns. + +```cel +WriteRelationshipsRequest.updates.all(x, x.relationship.resource.object_type == "resource") +``` + +### Subject-type Write Limit + +This CEL expression disables the ability for writes to occur on anything but the provided subject type. + +```cel +WriteRelationshipsRequest.updates.all(x, x.relationship.subject.object.object_type == "user") +``` + +### Create-only Write Limit + +This CEL expression disables the ability for writes to perform updates; they can only create new relationships. + +```cel +WriteRelationshipsRequest.updates.all( + x, + x.operation == authzed.api.v1.RelationshipUpdate.Operation.OPERATION_CREATE, +) +``` + +### Resource-type Read Limit + +This CEL expression limits the ReadRelationships API from being able to list anything but the a specific resource type. + +```cel +ReadRelationshipsRequest.relationship_filter.resource_type == "resource" +``` + +### Blocking Schema Writes + +This CEL expression prevents any schema writes that contain the substring "blockchain". +This example could be extended to prevent PII or undesirable patterns from reaching a production schema. + +```cel +!WriteSchemaRequest.schema.contains("blockchain") +``` + +### Limit Checks to one Permission + +This CEL expression limits CheckPermissions requests to only be able to check a particular permission. + +```cel +CheckPermissionRequest.permission == "admin" +``` + +## Configuration + +The process for setting up this feature varies depending on the AuthZed product you're using. + +### Dedicated & Cloud + +Using the web dashboard, navigate to the Permission System's "Access" tab. + +### Self-Hosted + +Use the following command-line flags: + +| Flag | Description | Default | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| --extender-authzed-fgam-endpoint | defines the external SpiceDB endpoint used to authorize operations for the authzed-fgam extender. If a file:// endpoint is provided, server is run embedded with static configuration | | +| --extender-authzed-fgam-preshared-key | defines the external SpiceDB preshared key used to authorize operations for the authzed-fgam extender. Ignored if endpoint is local (file://) | | +| --extender-enabled | must be set to `authzed-fgam` | | + +If you set `--extender-authzed-fgam-endpoint` to a file, it must be a YAML configuration file. + + + This configuration file should be treated like a secret because it contains + token hashes. + + +Here's an example showcasing the structure of static configuration: + +```yaml +role: + - id: "admin" + permission: + authzed.v1/ReadSchema: "" + authzed.v1/WriteSchema: "" + authzed.v1/ReadRelationships: "" + authzed.v1/WriteRelationships: "" + authzed.v1/DeleteRelationships: "" + authzed.v1/CheckPermission: "" + authzed.v1/LookupResources: "" + authzed.v1/LookupSubjects: "" + authzed.v1/ExpandPermissionTree: "" + authzed.v1/Watch: "" +service_account: + - id: "my_microservice" + token: + - id: "token_01" + hash: "71c73ba92f2032416b18a4f4fffb2a825755bea6a8430f2622ab1f3fb35a10d0" + - id: "token_02" + hash: "fcdfc4fa3c5c7381789d90c3c67f6cebf151cbf7e7555e91e77be2aa3e0a4bdf" +policy: + - id: "microservice_with_admin" + principal_id: "my_microservice" + principal_type: "service_account" + roles: + - "admin" +``` + +## Enabling without downtime + +If you want to apply a configuration to an existing SpiceDB cluster without downtime, you must conduct an upgrade process with the following steps: + +1. Create pre-shared keys that follow the Restricted Access Token format for each client of your SpiceDB instance. + Using some Bash: + + ```sh + # Generate your secret (substitute your preferred method for generating a cryptographically-secure random string here) + # This will be a part of the token + SECRET="$(base64 < /dev/random | head -c64)"; echo "$SECRET" + # g2l2/YjC3jFg6FdV080qiqBPvCrlLuc9GcHutgHF4WhVjsg7+AvlqLmoCrJEC68t + + # Hash that secret using sha256sum + # This will go in your FGAM configuration as the token hash + # NOTE: truncate the trailing spaces and "-". You just want the alphanum characters. + HASH="$(echo -n "$SECRET" | sha256sum | cut -d" " -f1)"; echo "$HASH" + # 1d619ac2f5013845c5f2df93add92fc87e88ca6c57d19a77d1b189663f1ff5b0 + + # Add the prefix "sdbst_h256_" to create the token that you'll supply to your client + printf "token: sdbst_h256_%s\nhash: %s\n" "$SECRET" "$HASH" + # token: sdbst_h256_g2l2/YjC3jFg6FdV080qiqBPvCrlLuc9GcHutgHF4WhVjsg7+AvlqLmoCrJEC68t + # hash: 1d619ac2f5013845c5f2df93add92fc87e88ca6c57d19a77d1b189663f1ff5b0 + ``` + +2. Prepare the FGAM configuration YAML. + You'll add the hashes that you generated in the previous step to the `hash` key in the `token` list for each respective token. + This process heavily depends on what each client needs: + 1. You may want to start with FGAM tokens bound to a admin-like Role, since that's what the original PSKs effectively were. + This is probably lower risk, and then from there you can move to start trimming down permissions. + 2. Or you may want to move directly to downscoped tokens for your individual services, creating the tokens you need. + This may be simple if you have few clients, but more complex as the number of clients grow, and with a bigger blast radius of impact on rollout. + A minimal configuration would look something like: + + ```yaml + role: + - id: "admin" + permission: + authzed.v1/CheckPermission: "" + service_account: + - id: "my_microservice" + token: + - id: "token_01" + hash: "1d619ac2f5013845c5f2df93add92fc87e88ca6c57d19a77d1b189663f1ff5b0" + policy: + - id: "microservice_with_admin" + principal_id: "my_microservice" + principal_type: "service_account" + roles: + - "admin" + ``` + +3. Set the created tokens as valid preshared keys in your SpiceDB instance. + You can do this by defining multiple PSKs via the ENV or flags as comma separated values: + + ```sh + spicedb serve --grpc-preshared-key=",,...," + ``` + + Deploy SpiceDB with this new configuration. + +4. Update all your clients to use the new tokens that you've created, according to which token should have which permissions. +5. Deploy SpiceDB with the new Restricted Access configuration. + +Prior to the migration, the keys that your client sends will be treated as preshared keys. +After the migration, the keys that your client sends will be treated as Restricted Access keys. diff --git a/content/authzed/concepts/security-embargo.mdx b/content/authzed/concepts/security-embargo.mdx new file mode 100644 index 0000000..36f37e3 --- /dev/null +++ b/content/authzed/concepts/security-embargo.mdx @@ -0,0 +1,70 @@ +import { Callout } from "nextra/components"; + +# Security Embargo + +All AuthZed products operate under a security embargo program. + +You can find a listing of public vulnerabilities for SpiceDB on [GitHub], [NVD], and anywhere else that syndicates vulnerabilities published to the [MITRE CVE List]. + + + **Info:** + You can find AuthZed's Security Response Policy and other policies at [security.authzed.com][sec]. + +[sec]: https://security.authzed.com + + + +[GitHub]: https://github.com/authzed/spicedb/security/advisories +[NVD]: https://nvd.nist.gov/vuln/search/results?query=spicedb +[MITRE CVE List]: https://cve.mitre.org/cve/ + +## What is a security embargo program? + +A security embargo program is a defined process under which security issues are privately reported, analyzed for applicability, notice is given, and a resolution is created and distributed. +The issue is only made public once those affected in the embargo program have enough time to address the issue or have accepted the risks. + +Security embargos are an industry best practice for ensuring that there are not critical software deployments with well documented exploitation instructions. + +## Reporting a security issue + +An email to security [AT] authzed.com should be used to notify the security team of any issues. +Be a good witness. +Behave as if you were reporting a crime and include specific details about what you have discovered. + +### Low and Medium Severity + +Issues meeting this severity are simply suspicions or odd behaviors. +They are not verified and require further investigation. +There is no clear indicator that systems have tangible risk and do not require emergency response. +This includes suspicious emails, outages, strange activity on a laptop that can be tracked back to our software. + +### High Severity + +High severity issues relate to problems where an adversary or active exploitation hasn’t been proven yet, and may not have happened, but likely to happen. +This may include vulnerabilities with direct risk of exploitation, threats with risk or adversarial persistence on our systems (e.g. backdoors, malware), malicious access of business data (e.g. passwords, vulnerability data, payments information), or threats that put any individual at risk of physical harm. + +Emails reporting high severity issues should include "Urgent" in the subject line. + +### Critical Severity + +Critical issues relate to actively exploited risks and involve a malicious actor. +Identification of active exploitation is critical to this severity category. + +Emails reporting critical severity issues should include "Critical" in the subject line. +Continue escalation until you receive acknowledgement. + +## How long until a vulnerability goes public? + +When vulnerabilities are reported, AuthZed works with the reporter to develop a resolution timeline. +Some vulnerabilities are reported by research firms that have strict policies to encourage a quick response and other reports are from individuals that may be more flexible with resolution time. + +Once a report has been verified and a timeline has been established, AuthZed customers are informed. +In the absence of a strict deadline, vulnerabilities are made public once every possibly affected AuthZed customer has either resolved the issue or accepted the risk to continuing to operate. + +## What actions must users under embargo take? + +Dedicated Deployments running the latest release in any [Update Channel] are automatically upgraded to include embargoed security patches. + +Deployments with versions pinned to an older release and self-hosted users under embargo are given notice, but are ultimately responsible for updating their own software. + +[Update Channel]: ./update-channels diff --git a/content/authzed/concepts/update-channels.mdx b/content/authzed/concepts/update-channels.mdx new file mode 100644 index 0000000..c7ca28d --- /dev/null +++ b/content/authzed/concepts/update-channels.mdx @@ -0,0 +1,34 @@ +# Update Channels + +Update Channels dictate the flow of updates to running SpiceDB clusters. + +All channels offer a trade-off between feature availability and update churn. +While each channel has different qualification standards, all channels offer fully tested GA releases of SpiceDB. + +While open source users of the [SpiceDB Operator] have a single update channel of open source releases, AuthZed maintains two configurable channels: + +- **Rapid**: every enterprise release of SpiceDB +- **Regular**: releases qualified by time spent deployed to Rapid, usually trailing by 1-2 releases + +[SpiceDB Operator]: ../../spicedb/ops/operator + +## Configuring Updates + +Once a channel is selected, Update Channels can be configured on two more dimensions: update strategy and rollout strategy. + +### Update Strategy + +By default, channels are configured for automatic updates. + +By unchecking a box, updates can be applied manually by selecting the next release whenever one so chooses. + +Regardless of choice, updates can only occur between compatible versions of SpiceDB as dictated by the Update Channel. + +### Rollout Strategy + +There are two different strategies for upgrading to a new version of SpiceDB: + +- **Rolling Update**: zero-downtime, gradual deployment of new SpiceDB instances +- **Immediate**: involves downtime, but is the fastest way to upgrade + +By default, a SpiceDB upgrade is applied using a Rolling Update to avoid downtime. diff --git a/content/authzed/concepts/workload-isolation.mdx b/content/authzed/concepts/workload-isolation.mdx new file mode 100644 index 0000000..e5f42b0 --- /dev/null +++ b/content/authzed/concepts/workload-isolation.mdx @@ -0,0 +1,31 @@ +import YouTube from "react-youtube"; + +# Workload Isolation + +Workload Isolation is functionality exclusive to AuthZed products that guarantees exclusive access to hardware to guarantee performance. + +## Control Plane + +Services that are secondary to serving real-time requests run one shared workload called the control plane. + +These services include: + +- AuthZed management UI and its accompanying machinery +- Observability services for the SRE team (e.g. Prometheus, Grafana, Jaeger) +- Compliance infrastructure + +## Workloads + +## Example Architecture Diagram + +![Workload Isolation Diagram](/images/workload-isolation-diagram.png) + +## Low-latency Workloads on Kubernetes + +AuthZed has presented a video to the [CNCF] documenting some of the critical components to running low-latency workloads on Kubernetes. +The technologies presented partially describe the implementation of Workload Isolation in AuthZed products: + +[CNCF]: https://cncf.io + +
+ diff --git a/content/authzed/guides/picking-a-product.mdx b/content/authzed/guides/picking-a-product.mdx new file mode 100644 index 0000000..701590f --- /dev/null +++ b/content/authzed/guides/picking-a-product.mdx @@ -0,0 +1,126 @@ +import { Callout } from "nextra/components"; + +# Picking the right AuthZed Product + +For those that want to consume SpiceDB without the overhead of operating the service, AuthZed offers managed SpiceDB services and support. + +When evaluating AuthZed's products, there are a few different requirements that can dramatically influence your decision. +This document is designed to give a high-level overview so that you can effectively evaluate those relevant to your specific use case. + +## Feature Matrix + +The following table maps functional requirements to their respective products: + +| Requirements | [Open Source] | [Cloud] | [Dedicated] | [Enterprise] | +| ---------------------------- | :-----------: | :-----: | :---------: | :----------: | +| [Materialize (Early Access)] | ❌ | ✅ | ✅ | ❌ | +| [Management Dashboard] | ❌ | ✅ | ✅ | ❌ | +| No-commit pricing | ✅ | ✅ | ❌ | ❌ | +| [Private Networking] | DIY | ❌ | ✅ | DIY | +| [Workload Isolation] | DIY | ✅ | ✅ | DIY | +| Self-Hosted | ✅ | ❌ | ❌ | ✅ | +| [Automated Updates] | DIY | ✅ | ✅ | DIY | +| [SOC2 Compliance] | DIY | ✅ | ✅ | DIY | +| [Audit Logging] | ❌ | ✅ | ✅ | ✅ | +| [Multi-Region] | DIY | ❌ | ✅ | DIY | +| [Security Embargo] | ❌ | ✅ | ✅ | ✅ | + +[Cloud]: #cloud +[Dedicated]: #dedicated +[Enterprise]: #enterprise +[Open Source]: #open-source +[Materialize (Early Access)]: ../concepts/authzed-materialize +[Audit Logging]: ../concepts/audit-logging +[Automated Updates]: ../concepts/update-channels +[Management Dashboard]: ../concepts/management-dashboard +[Multi-Region]: ../concepts/multi-region +[SOC2 Compliance]: https://security.authzed.com +[Security Embargo]: ../concepts/security-embargo + +## Product Overviews + +### Cloud + + + **Info:** + AuthZed Cloud is now available to everyone! + +[Sign up](https://authzed.com/cloud/signup) + + + +AuthZed Cloud is our self-service platform that allows you to provision, manage, and scale your authorization infrastructure on demand. + +Functionality includes: + +- Creating organizations, teams, and delegating access with your SSO provider +- Commitment-free, usage-based monthly pricing +- Workload Isolation vastly improving performance & latency +- Deployments in more regions, including Europe +- Full SpiceDB API access: Watch API, HTTP API +- Enterprise features like Audit Logs, Restricted API Access + +AuthZed Cloud is built on the same foundation that's serving critical production workloads in AuthZed Dedicated. + +### Dedicated + +Dedicated is a managed service that offers fully private deployments of our cloud platform in your cloud provider and regions of choice. + +Dedicated features highlights include: + +- Our most popular product +- [Materialize (Early Access)] +- [Workload Isolation] +- [Private Networking] +- [Multi-Region Deployments] +- [SOC2 compliance] +- [Security Embargo] +- [Restricted API Access] +- [Audit Logging] +- [Update Channels] +- [Expedited Support] + +Dedicated is sold and renewed on an annual basis. + +You can [schedule a call] to learn more. + +[Materialize (Early Access)]: ../concepts/authzed-materialize +[Multi-Region Deployments]: ../concepts/multi-region +[Private Networking]: ../concepts/private-networking +[Workload Isolation]: ../concepts/workload-isolation +[SOC2 compliance]: https://security.authzed.com +[Update Channels]: ../concepts/update-channels +[Security Embargo]: ../concepts/security-embargo +[Restricted API Access]: ../concepts/restricted-api-access +[Expedited Support]: ../concepts/expedited-support +[schedule a call]: https://authzed.com/call?utm_source=docs + +### Enterprise + +AuthZed Enterprise provides access to a licensed version of enterprise releases of SpiceDB for running in self-hosted/on-premise environments. + +You can [schedule a call] to learn more. + +[schedule a call]: https://authzed.com/call?utm_source=docs + +### Open Source + +We understand that AuthZed's product suite will not satisfy all use cases and, for that reason, we've built and continue to invest in an open-source community around SpiceDB. + +Community support is available to absolutely everyone and does not require any form of payment or usage of AuthZed products. + +[Discord] is the first line of support for asking questions or getting help with SpiceDB in real time. +[GitHub Discussions] is also available for asynchronous questions. +For bug reports, feature requests, and development discussion, [GitHub Issues] are available to all for participation. + + + The community encompasses not only folks paid to contribute, but maintainers and end-users volunteering their time to answer your questions. + +Please be respectful and read over the community [Code of Conduct] before interacting. + + + +[discord]: https://authzed.com/discord +[github discussions]: https://github.com/orgs/authzed/discussions/categories/q-a +[github issues]: https://github.com/authzed/spicedb/issues +[Code of Conduct]: https://github.com/authzed/spicedb/blob/main/CODE-OF-CONDUCT.md diff --git a/content/authzed/guides/setting-up-private-networking.mdx b/content/authzed/guides/setting-up-private-networking.mdx new file mode 100644 index 0000000..e428ade --- /dev/null +++ b/content/authzed/guides/setting-up-private-networking.mdx @@ -0,0 +1,152 @@ +import { Steps } from "nextra/components"; +import { Callout } from "nextra/components"; + +# Setting up Private Networking + +This guide walks through setting up AuthZed Dedicated [Private Networking]. + +[Private Networking]: ../concepts/private-networking + + + Private networking is recommended, but optional. Authzed Dedicated can be + configured to allow for connecting from the public internet. + + +## AWS Steps + + + +### Onboarding + +The customer success team at AuthZed should reach out about Private Networking during onboarding. + +In the case where there's any miscommunication, please reach out via Slack or [schedule a call][call]. + +[call]: https://authzed.com/call + +### Creating a VPC Endpoint + +[AWS PrivateLink] is the core technology involved in this process. + +[AWS PrivateLink]: https://docs.aws.amazon.com/whitepapers/latest/aws-privatelink/aws-privatelink.html + +Log into the AWS web console for the account you plan to peer. + +Navigate to `VPC` → `Endpoints` → `Create Endpoint` and input the following info: + +| Option | Selection | +| :--------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Name tag | Choose whatever you want | +| Service category | Select “Endpoint services that use NLBs and GWLBs” | +| Service name | Enter the "service name" provided to you by the AuthZed team | +| VPC | Choose the VPC from where you will deploy your SpiceDB client. DNS resolution for your SpiceDB cluster endpoint address will only be available from this VPC. | +| Subnets | You can deploy your VPC endpoint in one subnet per AZ. We recommend choosing all AZs where SpiceDB clients will exist. | +| IP address type | IPV4 | +| Security Group | Choose a security group that allows inbound port 443 traffic from your clients | + +When you're ready to create the new resource, click `Create endpoint` + +### Enabling DNS for the Endpoint + +By default, VPC Endpoints do not have access to public DNS. +This is a reasonable default for internal networking, but AuthZed publishes public DNS. +In order to make connections from your systems resilient to changing IP addresses, we will enable it. + +Navigate to the Endpoint that was just created, select `Actions` from the drop-down menu, and select `Modify private DNS name` from the next drop-down menu. + +Check `Enable for this endpoint` and then click `Save changes` to persist this setting. + +The diagram below explains how DNS resolution works with PrivateLink and AuthZed Dedicated. + +![privatelink-dns](/images/privatelink-dns.png) + +### Create a Permissions System + +You can skip this section if you've already created a Permissions System. + +1. Login to your SpiceDB management console +2. On the homepage, select `Add Permissions System` +3. Configure your permission system to your liking and create it + +### Verify Connectivity + +Ensure the next steps are executed on a machine that is in your internal network. +This might be your local machine, if your corporate VPN is peered with your cloud networks otherwise you'll probably have to create a container or EC2 instance in the network. + +There are a variety of ways to interact with the SpiceDB API, but for this guide we'll be using the official command-line client, [Zed]. + +If you don't already have zed installed, you can follow [this guide][install-zed]. + +For zed to connect to SpiceDB, we'll first have to create an context named `network-test` for connecting to our locally forwarded port: + +```sh +zed context set network-test example.us-east-1.aws.authzed.net:443 sdbst_h256_cef4fc239bf... +``` + +With our context set, we're free to make requests to our Permission System: + +```sh +zed schema read +``` + +If all is successful, you should see your schema or an empty Permission System should have thrown an error: + +``` +code = NotFound +desc = No schema has been defined; please call WriteSchema to start +``` + +[zed]: https://github.com/authzed/zed +[install-zed]: /spicedb/getting-started/installing-zed + + + +## GCP Steps + +Coming Soon + +## Azure Steps + + + +### Onboarding + +The customer success team at AuthZed should reach out about Private Networking during onboarding. + +In the case where there's any miscommunication, please reach out via Slack or [schedule a call](https://authzed.com/call). + +### Create an Azure Private Endpoint + +[Azure PrivateLink](https://azure.microsoft.com/en-us/products/private-link) is the core technology involved in this process. + +Login to the [Azure Portal](https://portal.azure.com/) and go to the private endpoint page. +Create a new private endpoint and select the following options: + +| Option | Selection | +| :--------------------: | :------------------------------------------------------------------------------------: | +| Name | Choose a descriptive name for your private endpoint. | +| Connection method | Connect to an Azure resource by resource ID or alias. | +| Alias | Enter the alias provided to you by the AuthZed team. | +| Subnet | Choose the subnet where your SpiceDB clients will be deployed. | +| Network Security Group | If you use a NSG, configure it to allow inbound traffic on port 443 from your clients. | + +### Configure DNS + +To access the private endpoint, you need to configure DNS resolution. +This can be done by creating a private DNS zone in Azure. + +Go to the private DNS zones page in the [Azure Portal](https://portal.azure.com/) and create a new private DNS zone. +Use the following settings: + +- Set the name of the private DNS zone using the DNS name of your SpiceDB cluster which is available in the AuthZed Dedicated management dashboard. +- Create an A record for the root (@) in the private DNS zone that points to the private IP address of your private endpoint which was created earlier. + +Now we need to link the private DNS zone to the virtual network where the private endpoint is located: + +- Click on Virtual Network Links and then click **Add**. +- Name the link and select the virtual network where your private endpoint is located. +- Enable the option **auto registration** to automatically register DNS records for resources in the virtual network. + +For more details on configuring Private Endpoints and DNS in Azure, refer to the [Azure documentation](https://learn.microsoft.com/en-us/azure/dns/private-dns-getstarted-portal). + + diff --git a/content/authzed/links/_meta.ts b/content/authzed/links/_meta.ts new file mode 100644 index 0000000..d8d6024 --- /dev/null +++ b/content/authzed/links/_meta.ts @@ -0,0 +1,20 @@ +import type { MetaRecord } from 'nextra' + +export default { + schedule: { + title: "Schedule a Call", + href: "https://authzed.com/call?utm_source=docs", + }, + changelog: { + title: "Changelog", + href: "https://changelog.authzed.cloud?utm_source=docs", + }, + "security-policies": { + title: "Security Policies", + href: "https://security.authzed.com?utm_source=docs", + }, + status: { + title: "Service Status Page", + href: "https://status.authzed.com?utm_source=docs", + }, +} satisfies MetaRecord; diff --git a/content/best-practices/_meta.ts b/content/best-practices/_meta.ts new file mode 100644 index 0000000..1866254 --- /dev/null +++ b/content/best-practices/_meta.ts @@ -0,0 +1,25 @@ +import type { MetaRecord } from 'nextra' + +export default { + index: { + title: "Best Practices", + theme: { + breadcrumb: false, + sidebar: true, + toc: true, + pagination: false, + }, + }, + priority: { + title: "Essential", + href: "#priority-a-essential", + }, + "strongly-recommended": { + title: "Strongly Recommended", + href: "#priority-b-strongly-recommended", + }, + recommended: { + title: "Recommended", + href: "#priority-c-recommended", + }, +} satisfies MetaRecord; diff --git a/content/best-practices/index.mdx b/content/best-practices/index.mdx new file mode 100644 index 0000000..14b6c4d --- /dev/null +++ b/content/best-practices/index.mdx @@ -0,0 +1,353 @@ +# Best Practices + +## Rule Categories + +### Priority A: Essential + +The essential rules are the most important ones. +Use them to ensure that your SpiceDB cluster is performant, your schema is sane, and your authorization logic is sound. +Exceptions to these rules should be rare and well justified. + +### Priority B: Strongly Recommended + +The strong recommendation rules will improve the schema design, developer experience, and performance of your SpiceDB cluster. +In most cases, these rules should be followed. + +### Priority C: Recommended + +The recommended rules reflect how we would run our own systems, but may not apply to every use case and may not make sense in every situation. +Follow them if you can and ignore them if you can’t. + +## Priority A Rules: Essential + +### Make Sure your Schema Fails Closed + +Tags: **schema** + +This is related to the idea of using negation sparingly, and of phrasing your schema additively. +Give thought to what happens if your application fails to write a relation: should the user have access in that case? +The answer is almost always `no`. + +This example is very simple, but illustrates the basic point: + +#### Avoid + +This schema starts with everyone having access and reduces it as you add users to the deny list. +If you fail to write a user to the deny list, they'll have access when they shouldn't: + +```zed +definition user {} + +definition resource { + relation public: user:* + relation deny: user + + permission view = public - deny +} +``` + +#### Prefer + +By contrast, this schema defaults to nobody having access, and therefore fails closed: + +```zed +definition user {} + +definition resource { + relation user: user + + permission view = user +} +``` + +This is an admittedly simple example, but the concept holds in more complex schemas. +This will also sometimes require a conversation about the business logic of your application. + +### Tune Connections to Datastores + +Tags: **operations** + +To size your SpiceDB connection pools, start by determining the maximum number of allowed connections based on the documentation for your selected datastore, divide that number by the number of SpiceDB pods you’ve deployed, then split it between read and write pools. + +Use these values to set the `--datastore-conn-pool-read-max-open` and `--datastore-conn-pool-write-max-open` flags, and set the corresponding min values to half of each, adjusting as needed based on whether your workload leans more heavily on reads or writes. + +#### Example + +Let's say you have a database instance that supports 200 connections, and you know that you read more than you write. +You have 4 SpiceDB instances in your cluster. +A starting point for tuning this might be: + +```sh +spicedb serve +# other flags here +--datastore-conn-pool-read-max-open 30 +--datastore-conn-pool-read-min-open 15 +--datastore-conn-pool-write-max-open 20 +--datastore-conn-pool-write-min-open 10 +``` + +This reserves 50 connections per SpiceDB instance and distributes them accordingly. + +The `pgxpool_empty_acquire` metric can help you understand if your SpiceDB pods are starved for connections if you're using Postgres or Cockroach. + +### Test Your Schema + +Tags: **schema** + +You should be testing the logic of your schema to ensure that it behaves the way you expect. + +- For unit testing and TDD, use test relations + assertions and [zed validate](https://authzed.com/docs/spicedb/modeling/validation-testing-debugging#zed-validate). +- For snapshot testing, use test relations + expected relations and [zed validate](https://authzed.com/docs/spicedb/modeling/validation-testing-debugging#zed-validate). +- For integration testing, use the SpiceDB test server with SpiceDB [serve-testing](https://authzed.com/docs/spicedb/modeling/validation-testing-debugging#integration-test-server). + +### Prefer Relations to Caveats + +Tags: **schema** + +If an authorization concept can be expressed using relations, it should be. +We provide caveats as an escape hatch; they should only be used for context that’s only available at request time, or else ABAC logic that cannot be expressed in terms of relationships. + +This is because caveats come with a performance penalty. +A caveated relationship is both harder to cache and also slows down computation of the graph walk required to compute a permission. + +Some examples: + +- A banlist - this could be expressed as a list in caveat context, but it can also be expressed as a relation with negation. +- A notion of public vs internal - boolean flags seem like an obvious caveat use case, but they can also be expressed using self relations. +- Dynamic roles - these could be expressed as a list in caveats, and it’s not immediately obvious how to build them into a SpiceDB schema, but our [Google Cloud IAM example](https://authzed.com/blog/google-cloud-iam-modeling) shows how it’s possible. + +### Make Your Writes Idempotent + +Tags: **application** + +Relations in SpiceDB are binary (a relation is present or it's not), and `WriteRelationships` calls are atomic. +As much as possible, we recommend that you use the [`TOUCH`](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.RelationshipUpdate) semantic for your write calls, because it means that you can easily retry writes and recover from failures. + +If you’re concerned about sequencing your writes, or your writes have dependencies, we recommend using [preconditions](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.Precondition). + +### Don’t truncate your tables when running Postgres + +Tags: **operations** + +If you truncate your Postgres table, your SpiceDB pods will become unresponsive until you run SpiceDB datastore repair. +We recommend either dropping the tables entirely and recreating them with `spicedb datastore migrate head` or deleting the data using a `DeleteRelationships` call instead. + +To ensure that every request, whether cached or not, gets a consistent point-in-time view of the underlying data, SpiceDB uses Multi-Version Concurrency Control. +Some datastores provide this natively; in others we’ve implemented it on top of the datastore. +In Postgres, the implementation of MVCC depends on the internals of the transaction counter being stored as data in the tables, so if you truncate the relationships table you desync the transaction counter with the stored relationships. + +## Priority B Rules: Strongly Recommended + +### Understand your consistency needs + +Tags: **operations** + +SpiceDB gives the user the ability to make tradeoffs between cache performance and up-to-date visibility using [its consistency options](https://authzed.com/docs/spicedb/concepts/consistency). +In addition to these call-time options, there are also some flags that can provide better cache performance if additional staleness is acceptable. +For example, by default, SpiceDB sets the Quantization Interval to 5s; check operations are cached within this window when using `minimize_latency` or `at_least_as_fresh` calls. +Setting this window to be larger increases the ability of SpiceDB to use cached results with a tradeoff of results staying in the cache longer. +More details about how these flags work together can be found in our [Hotspot Caching blog post](https://authzed.com/blog/hotspot-caching-in-google-zanzibar-and-spicedb). +To change this value, set the `--datastore-revision-quantization-interval` flag. + +When it comes to write consistency, SpiceDB defaults to high safety, +especially in distributed database writing scenarios, guaranteeing a visibility order. +Individual datastores may also allow a relaxation of this guarantee, based on your scenario; +for example, [setting CockroachDB’s overlap strategy](https://authzed.com/docs/spicedb/concepts/datastores#overlap-strategy), +can let you trade some ordering and consistency guarantees across domains for greatly increased write throughput. + +### Use GRPC When Possible + +Tags: **application** + +SpiceDB can be configured to expose both an [HTTP API](https://authzed.com/docs/spicedb/getting-started/client-libraries#http-clients) and associated Swagger documentation. +While this can be helpful for initial exploration, we strongly recommend using one of our gRPC-based official client libraries if your networking and calling language support it. +gRPC is significantly more performant and lower-latency than HTTP, and client-streaming services like ImportBulk can’t be used with the HTTP API. + +### Keep Permission Logic in SpiceDB + +Tags: **schema** + +One of the big benefits to using a centralized authorization system like SpiceDB is that there's one place to look for your authorization logic, and authorization logic isn't duplicated across services. +It can be tempting to define the authorization logic for an endpoint as being the `AND` or `OR` of the checks of other permissions, especially when the alternative is writing a new schema. +However, this increases the likelihood of drift across your system, hides the authorization logic for a system in that system's codebase, and increases the load on SpiceDB. + +### Avoid Cycles in your Schema + +Tags: **schema** + +Recursive schemas can be very powerful, but can also lead to large performance issues when used incorrectly. +A good rule of thumb is, if you need a schema definition to recur, have it refer to itself (e.g., groups can have subgroups). +Avoid situations where a definition points to a separate definition that, further down the permission chain, points to the original definition by accident. + +Avoid: + +```zed +definition user { + relation org: organization +} + +definition group { + relation member: user +} + +definition organization { + relation subgroup: group +} +``` + +Preferred: + +```zed +definition user {} + +definition group { + relation member: user | group +} +``` + +### Phrase Permissions Additively/Positively + +Tags: **schema** + +A more comprehensible permission system is a more secure permission system. +One of the easiest ways to maintain your authorization logic is to treat permissions as `positive` or `additive`: a user gains permissions when relations are written. +This reduces the number of ways that permission logic can interact, and prevents the granting of permission accidentally. + +In concrete terms, that means use wildcards and negations sparingly. +Start with no access and build up; don’t start with full access and pare down. + +### Use Unique Identifiers for Object Identifiers + +Tags: **application** + +Because you typically want to centralize your permissions in SpiceDB, that also means that most of the `IDs` of objects in SpiceDB are references to external entities. +These external entities shouldn't overlap. +To that end, we recommend either using `UUIDs` or using another identifier from the upstream that you can be sure will be unique, such as the unique sub field assigned to a user token by your IDP. + +### Avoid ReadRelationships API + +Tags: **application** + +The `ReadRelationships` API should be treated as an escape hatch, used mostly for data introspection. +Using it for permission logic is a code smell. +All checks and listing of IDs should use `Check`, `CheckBulk`, `LookupResources`, and `LookupSubjects`. +If you find yourself reaching for the `ReadRelationships` API for permission logic, there's probably a way to modify your schema to use one of the check APIs instead. + +### Prefer CheckBulk To LookupResources + +Tags: **application** + +Both `CheckBulk` and `LookupResources` can be used to determine whether a subject has access to a list of objects. +Where possible, we recommend `CheckBulk`, because its work is bounded to the list of requested checks, whereas the wrong `LookupResources` call can return the entire world and therefore be slow. + +LookupResources generally requires a lot of work, causes a higher load, and subsequently has some of the highest latencies. +If you need its semantics but its performance is insufficient, we recommend checking out our [Materialize](https://authzed.com/products/authzed-materialize) offering. + +## Priority C Rules: Recommended + +### Treat Writing Schema like Writing DB Migrations + +Tags: **operations** + +We recommend treating an update to your SpiceDB schema as though it were a database migration. +Keep it in your codebase, test it before deployment, and write it to your SpiceDB cluster as a part of your continuous integration process. +This ensures that updates to your schema are properly controlled. + +### Load Test + +Tags: **operations** + +To evaluate the performance and capabilities of your SpiceDB cluster and its underlying datastore, AuthZed provides [Thumper](https://github.com/authzed/thumper) — a load testing tool. +You can use Thumper to simulate workloads and validate schema updates before deploying them to a production environment. + +### Use ZedTokens and “At Least As Fresh” for Best Caching + +Tags: **application** + +SpiceDB’s fully consistent mode (`fully_consistent`) forces the use of the most recent datastore revision, which might not be the most optimal, and reduces cache hit rate, increasing latency and load on the datastore. + +If possible, we recommend using `at_least_as_fresh` with `ZedTokens` instead. +Capture the `ZedToken` returned by your initial request, then include it in all subsequent calls. +SpiceDB will guarantee you see a state at least as fresh as that token while still leveraging in-memory and datastore caches to deliver low-latency responses + +### Prefer Checking Permissions Instead of Relationships + +Tags: **application** + +It's possible to make a `Check` call with a relation as the permission. +Even in a simple schema, we recommend instead that you have a permission that points at the relation and to check the relation. +This is because if the logic of the check needs to change, it's easy to change the definition of a permission and difficult to change the definition of a relation (it often requires a data migration). + +### Enable schema watch cache + +Tags: **operations** + +In order to minimize load on the database, you can enable schema watch cache using the flag `--enable-experimental-watchable-schema-cache`. +The schema watch cache is a mechanism that improves performance and responsiveness by caching the currently loaded schema and watching for changes in real time. + +While we recommend enabling this, it isn't enabled by default because it requires additional configuration and knowledge of your datastore. +For Postgres, [`track_commit_timestamp`](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-TRACK-COMMIT-TIMESTAMP) must be set to `on` for the Watch API to be enabled. +For Spanner, there are a maximum of 5 changefeeds available globally for a table, and this consumes one of them. + +### Use the Operator + +Tags: **operations** + +To ensure seamless rollouts, upgrades, and schema migrations, it is recommended to use the SpiceDB Kubernetes Operator if you’re using Kubernetes. +The Operator automates many operational tasks and helps maintain consistency across environments. +You can find the official documentation for the SpiceDB operator [here](https://authzed.com/docs/spicedb/ops/operator). + +### Ensure that SpiceDB Can Talk To Itself + +Tags: **operations** + +In SpiceDB, dispatching subproblems refers to the internal process of breaking down a permission check or relationship evaluation into smaller logical components. +These subproblems are dispatched horizontally between SpiceDB nodes, which shares the workload and increases cache hit rate - this is [SpiceDB’s horizontal scalability](https://authzed.com/blog/consistent-hash-load-balancing-grpc). +For this to work, the SpiceDB nodes must be configured to be aware of each other. + +In our experience, running SpiceDB on Kubernetes with our [Operator](https://authzed.com/docs/spicedb/ops/operator) is the easiest and best way to achieve this. +It’s possible to configure dispatch using DNS as well, but non-Kubernetes based dispatching relies upon DNS updates, which means it can become stale if DNS is changing. +This is not recommended unless DNS updates are rare. + +### Choose the Right Load Balancer + +Tags: **operations** + +In our experience, TCP-level L4 load balancers play more nicely with gRPC clients than HTTP-level L7 load balancers. +For example, we’ve found that even though AWS Application Load Balancers purport to support gRPC, they have a tendency to drop connections and otherwise misbehave; AWS Network Load Balancers seem to work better. + +### Use the Provided Metrics, Traces, and Profiles + +Tags: **operations** + +To gain deeper insights into the performance of your SpiceDB cluster, the pods expose both Prometheus metrics and `pprof` profiling endpoints. +You can also configure tracing to export data to compatible OpenTelemetry backends. + +- Refer to the [SpiceDB Prometheus documentation](https://authzed.com/docs/spicedb/ops/observability#prometheus) for details on collecting metrics. + - AuthZed Cloud supports exporting metrics to Datadog via the official [AuthZed cloud datadog integration](https://docs.datadoghq.com/integrations/authzed_cloud/). + - To gain a complete picture of your SpiceDB cluster’s performance, it’s important to export metrics from the underlying datastore. + These metrics help identify potential bottlenecks and performance issues. + AuthZed Cloud provides access to both CockroachDB and PostgreSQL metrics via its cloud telemetry endpoints, enabling deeper visibility into database behavior. +- The [profiling documentation](https://authzed.com/docs/spicedb/ops/observability#profiling) explains how to use the pprof endpoints. +- The [tracing documentation](https://authzed.com/docs/spicedb/ops/observability#opentelemetry-tracing) walks you through sending trace data to a Jaeger endpoint. + +### Use Partials + Composable Schema to Organize your Schema + +Tags: **schema** + +As a schema grows in size and complexity, it can become difficult to navigate and grok. +We implemented [Composable Schemas](https://authzed.com/docs/spicedb/modeling/composable-schemas) to solve this problem, allowing you to break down a schema into multiple files and definitions into multiple problems. + +### Don't Re-Use Permissions Across Use Cases + +Tags: **schema** + +When adding a new feature or service, it can be tempting to re-use existing permissions that currently match the semantics you’re looking for, rather than doing the work of modifying the schema to introduce a new permission. +However, if the authorization business logic changes between use cases, you’ll not only have to do the work of modifying the permission, but also modifying the call site, so we recommend frontloading that work. + +### Use Expiration Feature for Expiration Logic + +Tags: **schema** + +Expiration is a common use case – at some future time, a permission is revoked. +It’s so common, it’s now [a built-in feature](https://authzed.com/docs/spicedb/concepts/expiring-relationships), and is far more efficient for SpiceDB to handle than doing the same with caveats! diff --git a/content/globals.css b/content/globals.css new file mode 100644 index 0000000..64c3b36 --- /dev/null +++ b/content/globals.css @@ -0,0 +1,95 @@ +@import 'tailwindcss'; + +@custom-variant dark (&:is(html[class~="dark"] *)); + +@theme { + --breakpoint-*: initial; + --breakpoint-sm: 640px; + --breakpoint-md: 768px; + --breakpoint-lg: 1024px; + --breakpoint-xl: 1280px; + --breakpoint-2xl: 1536px; + + --text-*: initial; + --text-xs: 0.75rem; + --text-sm: 0.875rem; + --text-base: 1rem; + --text-lg: 1.125rem; + --text-xl: 1.25rem; + --text-2xl: 1.5rem; + --text-3xl: 1.875rem; + --text-4xl: 2.25rem; + --text-5xl: 3rem; + --text-6xl: 4rem; + + --tracking-*: initial; + --tracking-tight: -0.015em; + + --color-dark: #111; + --color-transparent: transparent; + --color-current: currentColor; + --color-black: #000; + --color-white: #fff; + + --color-primary-50: #fafafa; + --color-primary-100: #f5f5f5; + --color-primary-200: #e5e5e5; + --color-primary-300: #d4d4d4; + --color-primary-400: #a3a3a3; + --color-primary-500: #737373; + --color-primary-600: #525252; + --color-primary-700: #404040; + --color-primary-800: #262626; + --color-primary-900: #171717; + --color-primary-950: #0a0a0a; +} + +/* + The default border color has changed to `currentcolor` in Tailwind CSS v4, + so we've added these compatibility styles to make sure everything still + looks the same as it did with Tailwind CSS v3. + + If we ever want to remove these styles, we need to add an explicit border + color utility to any element that depends on these defaults. +*/ +@layer base { + *, + ::after, + ::before, + ::backdrop, + ::file-selector-button { + border-color: var(--color-gray-200, currentcolor); + } +} + +body { + font-feature-settings: + "rlig" 1, + "calt" 1; +} + +/* https://github.com/tjallingt/react-youtube/issues/242 */ +.youtubeContainer { + position: relative; + width: 100%; + height: 0; + padding-bottom: 56.25%; + overflow: hidden; + margin-bottom: 50px; +} + +.youtubeContainer iframe { + width: 100%; + height: 100%; + position: absolute; + top: 0; + left: 0; +} + +.swagger-ui .information-container { + display: none; +} + +.swagger-ui .scheme-container { + display: none; +} diff --git a/content/index.mdx b/content/index.mdx new file mode 100644 index 0000000..649d7e8 --- /dev/null +++ b/content/index.mdx @@ -0,0 +1,13 @@ +Browse documentation for **SpiceDB** or **AuthZed Products** by selecting one below. + +import { Cards } from "nextra/components"; + + + + + diff --git a/content/mcp/_meta.ts b/content/mcp/_meta.ts new file mode 100644 index 0000000..09f2aa3 --- /dev/null +++ b/content/mcp/_meta.ts @@ -0,0 +1,18 @@ +import type { MetaRecord } from 'nextra' + +export default { + index: { + title: "Model Context Protocol", + theme: { + breadcrumb: true, + sidebar: true, + toc: true, + pagination: false, + }, + }, + authzed: "AuthZed MCP", + "mcp-reference": { + title: "MCP Reference Implementations", + href: "https://github.com/authzed/mcp-server-reference", + }, +} satisfies MetaRecord; diff --git a/content/mcp/authzed/_meta.ts b/content/mcp/authzed/_meta.ts new file mode 100644 index 0000000..4d30388 --- /dev/null +++ b/content/mcp/authzed/_meta.ts @@ -0,0 +1,10 @@ +import type { MetaRecord } from 'nextra' + +export default { + "authzed-mcp-server": { + title: "AuthZed MCP Server", + }, + "spicedb-dev-mcp-server": { + title: "SpiceDB Dev MCP Server", + }, +} satisfies MetaRecord; diff --git a/content/mcp/authzed/authzed-mcp-server.mdx b/content/mcp/authzed/authzed-mcp-server.mdx new file mode 100644 index 0000000..b404504 --- /dev/null +++ b/content/mcp/authzed/authzed-mcp-server.mdx @@ -0,0 +1,216 @@ +# AuthZed MCP Server + +Connect your AI tools to AuthZed and SpiceDB documentation using the Model Context Protocol (MCP). +Access comprehensive documentation, API references, and authorization pattern examples directly from your AI assistant. + +## Overview + +AuthZed MCP Server is a remote MCP server available at `https://mcp.authzed.com`. +It provides tools with searchable access to SpiceDB and AuthZed resources, enabling you to learn about authorization systems, explore APIs, and find implementation examples without leaving your LLM chat or development environment. + +### What You Can Do + +- **Search Documentation**: Find relevant information across all SpiceDB and AuthZed documentation pages +- **Discover APIs**: Explore all API methods and message types with detailed specifications +- **Find Examples**: Browse authorization pattern examples including RBAC, document sharing, and more with SpiceDB schemas included +- **Learn Concepts**: Access comprehensive guides on schema design, relationships, and permissions + +## Supported Clients + +Works with any MCP-compatible AI client including: + +- ChatGPT +- Claude Code and Claude Desktop +- Cursor +- VS Code with Copilot +- Windsurf +- Zed Editor +- Other MCP-compatible tools + +## Setup + +### ChatGPT + +Available on Pro and Plus accounts. + +1. Enable **Developer mode** in Settings +2. Create connector: + +- **Name**: AuthZed +- **MCP server URL**: `https://mcp.authzed.com` +- **Authentication**: None + +### Claude Code + +```bash +claude mcp add --transport http authzed https://mcp.authzed.com + +# Start Claude Code +claude +``` + +### Claude Desktop + +Available on Pro, Max, Team, and Enterprise plans. + +1. Open **Settings** → **Connectors** +2. Select **Add custom connector** +3. Configure: + +- **Name**: AuthZed +- **URL**: `https://mcp.authzed.com` + +### Cursor + +Add to `.cursor/mcp.json`: + +```json +{ + "mcpServers": { + "authzed": { + "url": "https://mcp.authzed.com" + } + } +} +``` + +### VS Code with Copilot + +1. Command Palette → **MCP: Add Server** +2. Select **HTTP** +3. Configure: + +- **URL**: `https://mcp.authzed.com` +- **Name**: AuthZed + +### Windsurf + +Add to `mcp_config.json`: + +```json +{ + "mcpServers": { + "authzed": { + "serverUrl": "https://mcp.authzed.com" + } + } +} +``` + +## Using the Server + +### Learning SpiceDB + +Ask natural language questions about SpiceDB concepts: + +- "How do I define a relationship in SpiceDB?" +- "What are the best practices for schema design?" +- "Explain how Zedtokens work" +- "What is the difference between relations and permissions?" + +### Exploring the API + +Discover API methods and understand their usage: + +- "How do I check permissions using the API?" +- "Show me all read-related API methods" +- "What parameters does WriteRelationships accept?" +- "Explain the CheckPermission response structure" + +### Finding Examples + +Search for authorization patterns and implementation examples: + +- "Show me docs sharing examples" +- "How do I implement role-based access control?" +- "Example schema with caveats" +- "Show me the Google IAM pattern in SpiceDB schema language" + +### Browsing Resources + +List and explore available resources: + +- "What example schemas are available?" +- "List all API methods" +- "Show me documentation about caveats" + +### Getting Help + +Get information about using the server effectively: + +- "How does the AuthZed MCP server work?" +- "What can I do with this server?" + +Use `system_instructions` to understand server capabilities and usage patterns. + +### Providing Feedback + +Share your experience to help improve the server: + +- "I'd like to provide feedback about the documentation" +- "The search results weren't helpful for my query" +- "I have a suggestion for the MCP server" + +The assistant uses `send_feedback` to guide you through submitting structured feedback about the MCP server, documentation quality, tool effectiveness, or your general experience. + +## Available Tools + +**`search_docs`** - Search documentation pages by content, URL, or path + +**`search_api`** - Search API methods and message types with type filtering + +**`search_examples`** - Search authorization pattern examples by title or description + +**`list_resources`** - List all documentation pages, API methods, messages, and examples + +**`send_feedback`** - Send the AuthZed team feedback about the MCP server + +## Prompts + +**`system_instructions`** - View the system instructions for the AuthZed MCP server, including how it works and how to use it effectively. + +**`explain_concept`** - Ask questions about SpiceDB concepts, AuthZed features, schema design, API usage, best practices, or troubleshooting. +Returns authoritative answers with documentation references and examples. + +**`send_feedback`** - Provide feedback about the AuthZed MCP server, documentation, tools, or your general experience. +The prompt guides you through submitting structured feedback. + +## Available Resources + +- **Documentation**: Doc pages are accessible with `docs://` URIs +- **API Methods**: API methods are accessible with `api://methods/` URIs +- **API Messages**: Message types are accessible `api://messages/` URIs +- **Examples**: Schema examples are accessible with `examples://schemas/` URIs + +## Security + +### Public Information Only + +The server provides access to: + +- Public SpiceDB and AuthZed documentation +- Publicly available API specifications +- Open source schema examples + +The server does **not** access: + +- Your AuthZed or SpiceDB instances +- Your authorization data +- Your application schemas +- Any private information + +### Verify the Endpoint + +Always confirm you're connecting to: + +``` +https://mcp.authzed.com +``` + +When using third-party marketplaces, verify the domain name before connecting. + +## Troubleshooting + +**Connection issues**: Verify the URL is `https://mcp.authzed.com` and your client supports remote HTTP MCP servers + +**No search results**: Try broader terms, check spelling, or use `list_resources` to see available content diff --git a/content/mcp/authzed/spicedb-dev-mcp-server.mdx b/content/mcp/authzed/spicedb-dev-mcp-server.mdx new file mode 100644 index 0000000..e209c2c --- /dev/null +++ b/content/mcp/authzed/spicedb-dev-mcp-server.mdx @@ -0,0 +1,326 @@ +import { Callout } from "nextra/components"; + +# SpiceDB Dev MCP Server + + + The SpiceDB Dev MCP server is available as Experimental. + +Experimental functionality enables you to try features and provide feedback during the development process. +However, these features may change or be removed before reaching a stable release. + + + +Run a local SpiceDB development environment directly in your AI coding assistant. +Build, test, and debug permissions systems interactively with an in-memory SpiceDB instance. + +## Overview + +SpiceDB Dev MCP Server is a local development tool that runs an in-memory SpiceDB instance accessible through MCP. +It's designed for developers actively building permissions systems who want to iterate quickly on schemas and test permission logic with AI assistance. + +**Key characteristics**: + +- Runs locally on your machine +- In-memory only (no persistence) +- No external dependencies +- Does not connect to running SpiceDB instances +- Integrated with Zed CLI + +Perfect for: schema development, permission testing, learning SpiceDB through hands-on practice, debugging authorization logic. + +## Prerequisites + +Install the latest version of the zed CLI: + +[Installing zed](/spicedb/getting-started/installing-zed) + +## Starting the Server + +Start the development server: + +```bash +zed mcp experimental-run +``` + +The server starts on `http://localhost:9999/mcp` with an empty in-memory SpiceDB instance. + +**Important**: The server runs in-memory only. +All schemas and relationships are lost when you stop the server. + +## Connecting Clients + +### Claude Code + +To use with Claude Code, run `zed mcp experimental-run` to start the SpiceDB Dev MCP server and then run the following to add the server to your Claude Code integrations: + +```bash +claude mcp add --transport http spicedb http://localhost:9999/mcp +``` + +Then start developing: + +```bash +claude +``` + +### Other Clients + +For MCP clients supporting HTTP transport, configure: + +``` +Transport: HTTP +URL: http://localhost:9999/mcp +``` + +## Development Workflow + +### 1. Write a Schema + +Start by defining your authorization model: + +> You: "Create a schema for a document sharing system. +> Documents have owners, editors, and viewers. +> Owners can share documents, editors can edit, and viewers can only read." + +The assistant uses `write_schema` to create the schema in your development instance. + +### 2. Create Test Data + +Build relationships to test your model: + +> You: "Create test data where alice owns doc:readme, bob is an editor, and charlie is a viewer." + +The assistant uses `update_relationships` to populate your instance with test relationships. + +### 3. Test Permissions + +Validate your authorization logic: + +> You: "Can charlie edit doc:readme?" + +The assistant uses `check_permission` to test the logic and returns `NO_PERMISSION` (charlie is only a viewer). + +> You: "Can bob edit doc:readme?" + +Returns `HAS_PERMISSION` (bob is an editor). + +### 4. Explore the Graph + +Query your authorization relationships: + +> You: "Which documents can alice share?" + +The assistant uses `lookup_resources` to show all documents alice has owner permissions on. + +> You: "Who can view doc:readme?" + +The assistant uses `lookup_subjects` to show alice, bob, and charlie. + +### 5. Iterate + +Refine your schema based on testing: + +> You: "Add a manager role that can edit and also share documents" + +The assistant updates the schema with `write_schema` and you can immediately test the new permissions. + +## Available Tools + +**`write_schema`** - Write or update the SpiceDB schema + +**`update_relationships`** - Add or update relationships in the instance + +**`delete_relationships`** - Remove relationships from the instance + +**`check_permission`** - Check if a subject has a specific permission on a resource + +**`lookup_resources`** - Find all resources a subject can access with a given permission + +**`lookup_subjects`** - Find all subjects that have a given permission on a resource + +## Available Resources + +**`schema://current`** - View the current SpiceDB schema + +**`relationships://all`** - View all relationships in the instance + +**`instructions://`** - Get instructions on using the development server + +**`validation://current`** - Access the current validation file for testing + +## Development Tips + +### Iterative Development + +1. Start with a simple schema +2. Add test relationships +3. Check permissions to validate behavior +4. Refine the schema based on results +5. Repeat + +### Testing Edge Cases + +Use the development server to test: + +- Indirect permissions through [subject relations](/spicedb/concepts/schema#subject-relations) +- Complex permission unions and intersections +- Caveat evaluation with different contexts +- Deeply nested organizational hierarchies + +### Validation Files + +Use validation files to: + +- Define expected permission outcomes +- Test your schema systematically +- Document authorization requirements +- Share test cases with your team + +Access with `validation://current` resource. + +### Saving Your Work + +The MCP server works with your AI assistant to help you save schemas and validation files to disk. + +#### Exporting Schemas + +Ask your assistant to save the current schema to a file: + +> You: "Save my schema to schema.zed" +> +> You: "Write the current schema to permissions/document-sharing.zed" + +The assistant reads from `schema://current` and writes the schema to your specified file path. + +#### Exporting Validation Files + +Save validation test cases to preserve your permission tests: + +> You: "Save the validation file to tests/permissions.yaml" +> +> You: "Export validation to document-tests.yaml" + +The assistant reads from `validation://current` and writes the validation YAML to your specified file. + +#### Loading Existing Files + +You can also load schemas from existing files: + +> You: "Load the schema from schema.zed and apply it to the dev server" +> +> You: "Read permissions/document-sharing.zed and write it to the MCP server" + +The assistant reads your file and uses `write_schema` to apply it to the development instance. + +## Security Considerations + +### Local Development Only + +**Never use in production**: + +- No authentication or authorization on the server itself +- In-memory only, no data persistence +- Designed for localhost access only +- No audit logging or compliance features + +### Network Isolation + +- Server binds to localhost (127.0.0.1) by default +- Do not expose port 9999 to external networks + +### Test Data Only + +- Use fictional data, never real user information +- Do not test with production credentials +- Avoid sensitive or confidential information +- Remember: all data is lost on shutdown + +### Development Hygiene + +- Track schemas in version control +- Document permission models separately +- Review schemas before production deployment +- Use validation files to capture test cases + +## Troubleshooting + +### Server Won't Start + +**Check Zed CLI installation**: + +```bash +zed version +``` + +**Port 9999 in use**: + +```bash +# macOS/Linux +lsof -i :9999 + +# Windows +netstat -an | findstr 9999 +``` + +**Solution**: Stop the conflicting process or restart your machine. + +### Client Connection Fails + +- Verify server is running: `zed mcp experimental-run` should be active +- Confirm URL is `http://localhost:9999/mcp` +- Check client supports HTTP transport +- Ensure localhost is not blocked by firewall + +### Schema Errors + +**Syntax errors**: Review against [SpiceDB schema documentation](/spicedb/modeling/developing-a-schema) + +**Undefined types**: Ensure all referenced types are defined in the schema + +**View detailed errors**: Access `validation://current` resource + +### Unexpected Permission Results + +**Debug process**: + +1. View current schema: `schema://current` +2. List all relationships: `relationships://all` +3. Check indirect permission paths +4. Use lookup tools to explore the authorization graph +5. Verify relation chains are correct + +### Data Loss + +**Remember**: The server is in-memory only. +All data is lost when stopped. + +**To preserve work**: + +- Save schemas to files regularly +- Export relationships for test cases +- Use validation files to document expected behavior +- Commit schemas to version control frequently + +## Moving to Production + +When ready to move beyond development: + +1. **Export your schema**: Save the final schema from `schema://current` + - Ask your assistant: "Save my schema to schema.zed" +2. **Document permissions**: Create comprehensive validation files + - Ask your assistant: "Save validation to tests/permissions.yaml" +3. **Deploy SpiceDB**: See [Deploying with SpiceDB Operator](/spicedb/ops/deploying-spicedb-operator) +4. **Connect your application**: Use [SpiceDB client libraries](/spicedb/getting-started/client-libraries) +5. **Import relationships**: Migrate test relationships if appropriate +6. **Test thoroughly**: Validate in staging before production + +The development server is not suitable for production use. +Deploy a proper SpiceDB instance with authentication, persistence, and monitoring. + +## Resources + +- [SpiceDB Documentation](/spicedb/getting-started/discovering-spicedb) +- [Schema Language Reference](/spicedb/modeling/developing-a-schema) +- [Zed CLI Documentation](/spicedb/getting-started/installing-zed) +- [SpiceDB Client Libraries](/spicedb/getting-started/client-libraries) +- [Validation Testing Guide](/spicedb/modeling/validation-testing-debugging) diff --git a/content/mcp/index.mdx b/content/mcp/index.mdx new file mode 100644 index 0000000..0947aff --- /dev/null +++ b/content/mcp/index.mdx @@ -0,0 +1,120 @@ +# Model Context Protocol + +## Overview + +Model Context Protocol (MCP) is an open standard that enables large language models (LLMs) to communicate with external tools and data sources. +Often referred to as "USB-C for AI", MCP provides a universal interface that allows AI applications to integrate once and interoperate with any MCP-compatible system. + +By standardizing how AI tools access external data and functionality, MCP eliminates the need for custom integrations between every AI application and every external service. + +- [Latest Version MCP Specification](https://modelcontextprotocol.io/specification/latest) +- Try the [AuthZed MCP Server](https://mcp.authzed.com) + +## Understanding MCP + +### The Problem MCP Solves + +LLMs face fundamental limitations when providing contextually relevant responses: + +- **Limited knowledge cutoff**: Models are trained on data up to a specific point in time and lack access to real-time or current information +- **No external data access**: LLMs cannot natively access databases, APIs, or external systems +- **Manual context augmentation**: Previously, users had to manually copy and paste relevant data into chat interfaces +- **Integration complexity**: AI coding assistants required custom-built integrations with databases and backend systems for each deployment + +### How MCP Works + +The MCP specification standardizes how AI tools interact with data sources and functionality. +Instead of building separate integrations for each AI application and external service, developers implement MCP once and gain compatibility across the entire ecosystem. + +MCP uses a client-server architecture: + +- **MCP Host**: The AI application that users interact with (IDEs like Cursor or Windsurf, chat applications like ChatGPT or Claude, AI agents) +- **MCP Client**: The connection component within the host that communicates with external services +- **MCP Server**: The external service being accessed (databases, APIs, cloud services) + +Each MCP host must create and manage separate MCP client connections for each MCP server it communicates with. +This architecture enables AI applications to access multiple data sources and tools simultaneously while maintaining clear separation between services. + +## Core Capabilities + +MCP provides three primitives for extending LLM functionality: + +### Resources + +Resources inject information into the AI's context. +This includes configuration data, documentation, company policies, product catalogs, or customer information. +Resources enable AI models to work with accurate, current data without manual intervention. + +**Example use case**: When drafting a customer email, automatically provide the customer's order history, support ticket details, and account preferences so the AI can write personalized, contextually accurate responses. + +### Tools + +Tools enable AI models to trigger actions on behalf of users based on goals and information in the context. +They allow AI to interact with external systems, automating tasks that would otherwise require manual execution across multiple applications. + +**Example use case**: Create a new project in your task management system, assign team members, set deadlines, and generate initial task lists—all from a single conversation with the AI, without switching between applications. + +### Prompts + +Prompts provide tested, reusable instructions that guide AI behavior consistently. +Instead of users needing to craft precise instructions each time, MCP servers can supply pre-configured prompts that have been refined for specific tasks or contexts. + +**Example use case**: When analyzing customer feedback, use a standardized prompt that ensures the AI always considers sentiment, key themes, and actionable insights in a consistent format across your team. + +## Security and Authorization + +### Evolution of the Specification + +The MCP specification has evolved rapidly since its introduction, with three major releases in under eight months: + +- **2024-11-05**: Initial specification release +- **2025-03-26**: Enhanced security features +- **2025-06-18**: Enterprise readiness improvements + +The most recent versions have focused heavily on security and enterprise readiness, introducing mechanisms to authenticate users and clients while providing recommendations for authorizing resource access. +The ability to implement granular access controls for resources is especially critical for enterprises integrating sensitive company and user data with MCP servers. + +### Deployment Considerations + +MCP servers can be deployed in two primary configurations, each with distinct authorization requirements: + +#### Local MCP Servers + +Local servers run as single instances on individual machines. +These servers are assumed to be under the custodian of the user running them. +Most MCP clients provide functionality that prompts users to approve tool invocations and resource access before execution. +While this provides a basic security layer, it relies on user vigilance and awareness. + +#### Remote MCP Servers + +Remote servers are hosted and accessed in multi-tenant environments, serving multiple users and organizations. +These deployments require robust authentication and access control mechanisms for MCP resources. +The MCP specification provides high-level guidance for authorization, but implementation details—including specific permission models and accurate enforcement—are the responsibility of MCP server developers. + +### Security Risks: The Lethal Trifecta + +Security researcher Simon Willison identified a [dangerous combination of capabilities](https://simonwillison.net/2025/Jun/16/the-lethal-trifecta/) that can lead to data theft in AI systems: + +1. **Access to your private data**: One of the most common purposes of tools in the first place +2. **Exposure to untrusted content**: Any mechanism by which text or images controlled by a malicious attacker could become available to your LLM +3. **The ability to externally communicate**: Methods that could be used to exfiltrate your data + +Implementing robust authorization in your MCP server can mitigate these risks within your service. +However, once data from your MCP server is sent to the MCP host application and becomes part of the context, you lose control over access to that data. +AI applications often have multiple MCP servers enabled simultaneously, and you cannot enforce permissions for actions taken on your data within other servers or the host application itself. + +### Best Practices + +This limitation underscores the importance of: + +- Implementing least-privilege access controls +- Carefully evaluating which data to expose through MCP resources +- Understanding the trust boundaries between your MCP server and the host application +- Considering data sensitivity when designing your MCP server's capabilities + +## AuthZed MCP Resources + +AuthZed provides official MCP server implementations and reference architectures for using SpiceDB and AuthZed to build authorization-aware MCP servers: + +- **Official MCP Server**: [https://mcp.authzed.com](https://mcp.authzed.com) +- **Reference Implementation**: [https://github.com/authzed/mcp-server-reference](https://github.com/authzed/mcp-server-reference) diff --git a/content/spicedb/_meta.ts b/content/spicedb/_meta.ts new file mode 100644 index 0000000..1dac4ac --- /dev/null +++ b/content/spicedb/_meta.ts @@ -0,0 +1,10 @@ +import type { MetaRecord } from 'nextra' + +export default { + "getting-started": "Getting Started", + concepts: "Concepts", + modeling: "Modeling & Integrating", + ops: "Operations", + api: "API Reference", + links: "Links", +} satisfies MetaRecord; diff --git a/content/spicedb/api/_meta.ts b/content/spicedb/api/_meta.ts new file mode 100644 index 0000000..ece9264 --- /dev/null +++ b/content/spicedb/api/_meta.ts @@ -0,0 +1,15 @@ +import type { MetaRecord } from 'nextra' + +export default { + "grpc-api": { + title: "gRPC API Reference", + href: "https://buf.build/authzed/api/docs/main:authzed.api.v1", + }, + "http-api": { + title: "HTTP API Reference", + }, + postman: { + title: "Postman Collection", + href: "https://www.postman.com/authzed/workspace/spicedb/overview", + }, +} satisfies MetaRecord; diff --git a/content/spicedb/api/http-api.mdx b/content/spicedb/api/http-api.mdx new file mode 100644 index 0000000..5304c61 --- /dev/null +++ b/content/spicedb/api/http-api.mdx @@ -0,0 +1,5 @@ +import { Swagger } from "../../../components/swagger"; + +# HTTP API Documentation + + diff --git a/content/spicedb/concepts/_meta.ts b/content/spicedb/concepts/_meta.ts new file mode 100644 index 0000000..2302aae --- /dev/null +++ b/content/spicedb/concepts/_meta.ts @@ -0,0 +1,15 @@ +import type { MetaRecord } from 'nextra' + +export default { + zanzibar: "Google Zanzibar", + schema: "Schema Language Reference", + relationships: "Writing Relationships", + caveats: "Writing Relationships with Caveats", + "expiring-relationships": "Writing Relationships that Expire", + commands: "SpiceDB Commands & Parameters", + consistency: "Consistency", + datastores: "Datastores", + "datastore-migrations": "Datastore Migrations", + "reflection-apis": "Reflection APIs", + watch: "Watching Changes", +} satisfies MetaRecord; diff --git a/content/spicedb/concepts/caveats.mdx b/content/spicedb/concepts/caveats.mdx new file mode 100644 index 0000000..7129ee2 --- /dev/null +++ b/content/spicedb/concepts/caveats.mdx @@ -0,0 +1,326 @@ +import { Callout } from "nextra/components"; + +# Caveats + +Caveats are expressions that can return true or false, and they can be attached (by name) to relationships. + +They allow relationships to be defined conditionally: when executing permission checks (e.g. [CheckPermission]), the caveated relationship will only be considered present if the caveat expression evaluates to true at the time you run the `CheckPermission`. + +[CheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.CheckPermission + +Caveats allow for an elegant way to model dynamic policies and ABAC-style (Attribute Based Access Control) decisions while still providing scalability and performance guarantees. + +## Defining Caveats + +Caveats are named expressions that are defined in the [schema](./schema) alongside definitions for object types. +A caveat definition includes a name, one or more well-typed parameters, and a [CEL expression] returning a boolean value. + +[CEL expression]: https://github.com/google/cel-spec + +Here's schema snippet demonstrating what a simple caveat looks like: + +```zed +caveat first_caveat(first_parameter int, second_parameter string) { + first_parameter == 42 && second_parameter == "hello world" +} +``` + +### Parameter Types + +The following table documents the CEL types available for values in caveat expressions: + +| Type | Description | +| ----------- | ------------------------------------------------ | +| `any` | any value is allowed; useful for types that vary | +| `int` | 64-bit signed integer | +| `uint` | 64-bit unsigned integer | +| `bool` | boolean | +| `string` | utf8-encoded string | +| `double` | double-width floating point number | +| `bytes` | sequence of uint8 | +| `duration` | duration of time | +| `timestamp` | specific moment in time (typically UTC) | +| `list` | generic sequence of values | +| `map` | generic mapping of strings to values | +| `ipaddress` | spicedb-specific type for IP addresses | + +Developers looking for the SpiceDB code that defines of these types can find them in the [pkg/caveats/types module]. + +[pkg/caveats/types module]: https://github.com/authzed/spicedb/blob/main/pkg/caveats/types + +### Some Examples + +#### Basic comparison + +```zed +caveat is_tuesday(today string) { + today == 'tuesday' +} +``` + +#### Attribute Matching + +The example below defines a caveat that requires that any expected attributes found within the expected map are a subset of the attributes in the provided map: + +```zed +caveat attributes_match(expected map, provided map) { + expected.isSubtreeOf(provided) +} +``` + +#### IP address checking + +The example below defines a caveat that requires that a user’s IP address is within a specific CIDR range: + +```zed +caveat ip_allowlist(user_ip ipaddress, cidr string) { + user_ip.in_cidr(cidr) +} +``` + +## Allowing caveats on relations + +To allow a caveat to be used when writing a relationship, the caveat must be specified on the relation within the schema via the **with** keyword: + +```zed +definition resource { + relation viewer: user | user with ip_allowlist +} +``` + +In the above example, a relationship can be written for the `viewer` relation to a `user` without a caveat OR with the `ip_allowlist` caveat. + +To make the caveat **required**, the `user |` can be removed. + +## Writing relationships with caveats and context + +When writing a relationship for a relation, both the caveat and a portion of the “context” can be specified: + +```textproto +WriteRelationshipsRequest { + Updates: [ + RelationshipUpdate{ + Operation: CREATE + Relationship: { + Resource: …, + Relation: "viewer", + Subject: …, + OptionalCaveat: { + CaveatName: "ip_allowlist", + Context: structpb{ "cidr": "1.2.3.0/24" } + } + } + } + ] +} +``` + +A few important notes: + +- The **Context** of a caveat is defined both by the values written in the `Relationship`, as well as those provided in the `CheckPermissionRequest`: if empty, then only the context specified on a CheckPermission request will be used. + Otherwise, the values in the `Relationship` take precedence over those in the `CheckPermissionRequest`. + - Context of a caveat provided in `Relationship` is stored alongside the relationship and is provided to the caveat expression at runtime. + This allows for **partial** binding of data at write time. +- The Context is a `structpb`, which is defined by Google [and represents JSON-like data](https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb). + - To send 64-bit integers, encode them as strings. +- A relationship cannot be duplicated, with or without a caveat; i.e. two relationships that differ only on their use of a caveat cannot both exist. +- When deleting a relationship, a caveat does not need to be specified; the matching relationship will be deleted if present. + +## Providing Caveat Context via the API + +### `CheckPermission` + +When issuing a [CheckPermission request][check-req], additional caveat context can be specified to represent the known context at the time of the check: + +```textproto +CheckPermissionRequest { + resource: { + object_type: "book", + object_id: "specificbook", + }, + permission: "view", + subject: { + object: { + object_type: "user", + object_id: "specificuser", + }, + }, + context: { "user_ip": "1.2.3.4" } +} +``` + +The check engine will automatically apply the context found on the relationships, as well as the context provided by the CheckPermission call, and return [one of three states][states]: + +- `PERMISSIONSHIP_NO_PERMISSION` - subject does not have the permission on the resource +- `PERMISSIONSHIP_HAS_PERMISSION` - subject has permission on the resource +- `PERMISSIONSHIP_CONDITIONAL_PERMISSION` - required context is missing to determine permissionship + +In the case of `PERMISSIONSHIP_CONDITIONAL_PERMISSION`, SpiceDB will also return the missing context fields in the [CheckPermissionResponse][check-resp] so the caller knows what additional context to fill in if they wish to rerun the check and get a determined answer. + +[check-req]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.CheckPermission +[states]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.CheckPermissionResponse.Permissionship +[check-resp]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.CheckPermissionResponse + +### `LookupResources` and `LookupSubjects` + +Similarly to **CheckPermission**, both **LookupResources** and **LookupSubjects** can be provided with additional context and will return one of the two permission states for each of the results found (either has permission or conditionally has permission). + +```textproto +LookupResourcesRequest { + resource_object_type: "book", + permission: "view", + subject: { + object: { + object_type: "user", + object_id: "specificuser", + }, + }, + context: { "user_ip": "1.2.3.4" } +} +``` + +## Providing Caveat Context with `zed CLI` + +When using `zed` command-line tool to interact with SpiceDB, the context can be provided using the `--caveat-context` flag. +The caveat context should be a JSON representation that matches the types defined in the schema. +For example, with the following caveat: + +```zed +caveat first_caveat(first_parameter int, second_parameter string) { + first_parameter == 42 && second_parameter == "hello world" +} +``` + +We would need to forward a JSON object like: + +```json +{ + "first_parameter": 42, + "second_parameter": "hello world" +} +``` + +The full command would look like: + +```shell +zed check -r resource:specificresource#view -p view -s user:specificuser --caveat-context '{"first_parameter": 42, "second_parameter": "hello world"}' +``` + + + Please note the use of single quotes to escape the characters in the JSON + representation of the context. You don't need character escaping when + providing context using zed in the Authzed Playground. + + +## Full Example + +A full example of a schema with caveats can be found below, which allows users to `view` a resource if they are directly a `viewer` or if they are a `viewer` within the correct IP CIDR range: + +### Schema + +```zed +definition user {} + +caveat has_valid_ip(user_ip ipaddress, allowed_range string) { + user_ip.in_cidr(allowed_range) +} + +definition resource { + relation viewer: user | user with has_valid_ip + permission view = viewer +} +``` + +### Write Relationships + +```textproto +WriteRelationshipsRequest { + Updates: [ + RelationshipUpdate{ + Operation: CREATE + Relationship: { + Resource: { + ObjectType: "resource", + ObjectId: "someresource", + }, + Relation: "viewer", + Subject: { + ObjectType: "user", + ObjectId: "sarah", + }, + OptionalCaveat: { + CaveatName: "has_valid_ip", + Context: structpb{ "allowed_range": "10.20.30.0/24" } + } + } + } + ] +} +``` + +### Check Permission + +```textproto +CheckPermissionRequest { + Resource: { + ObjectType: "resource", + ObjectId: "someresource", + }, + Permission: "view", + Subject: { + ObjectType: "user", + ObjectId: "sarah", + }, + Context: { "user_ip": "10.20.30.42" } +} +``` + +## Validation with Caveats + +The [Assertions] and [Expected Relations] definitions for validation of schema support caveats as well. + +[assertions]: /spicedb/modeling/developing-a-schema#assertions +[expected relations]: /spicedb/modeling/developing-a-schema#expected-relations + +### Assertions + +Caveated permissions can be checked in assertions by the addition of the `assertCaveated` block: + +```yaml filename="Assertions for caveated permissions" +assertTrue: + - "document:specificdocument#reader@user:specificuser" +assertCaveated: + - "document:specificdocument#reader@user:caveateduser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" +``` + +To assert that a permission does or does not exist when some context it specified, the `with` keyword can be used to provide the context: + +```yaml filename="Assertions for caveated permissions with context" +assertTrue: + - "document:specificdocument#reader@user:specificuser" + - 'document:specificdocument#reader@user:caveateduser with {"somecondition": true}' +assertCaveated: + - "document:specificdocument#reader@user:caveateduser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" + - 'document:specificdocument#reader@user:caveateduser with {"somecondition": false}' +``` + +### Expected Relations + +Expected relations notes if a subject is caveated via the inclusion of the `[...]` string on the end of the subject: + +```yaml filename="Expected Relations with caveats" +document:specificdocument#view: + - "[user:specificuser] is " + - "[user:caveateduser[...]] might be " +``` + + + Expected Relations does **not** evaluate caveats, even if the necessary context is fully specified on the relationship. + +This means that a caveated subject that might actually return `HAS_PERMISSION` will appear as `subject[...]` in expected relations + + diff --git a/content/spicedb/concepts/commands.mdx b/content/spicedb/concepts/commands.mdx new file mode 100644 index 0000000..532e6a5 --- /dev/null +++ b/content/spicedb/concepts/commands.mdx @@ -0,0 +1,589 @@ +## Reference: `spicedb` + +A database that stores, computes, and validates application permissions + +### Examples + +``` + No TLS and in-memory: + spicedb serve --grpc-preshared-key "somerandomkeyhere" + + TLS and a real datastore: + spicedb serve --grpc-preshared-key "realkeyhere" --grpc-tls-cert-path path/to/tls/cert --grpc-tls-key-path path/to/tls/key \ + --http-tls-cert-path path/to/tls/cert --http-tls-key-path path/to/tls/key \ + --datastore-engine postgres --datastore-conn-uri "postgres-connection-string-here" + +``` + +### Options + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +### Children commands + +- [spicedb datastore](#reference-spicedb-datastore) - datastore operations +- [spicedb lsp](#reference-spicedb-lsp) - serve language server protocol +- [spicedb serve](#reference-spicedb-serve) - serve the permissions database +- [spicedb serve-testing](#reference-spicedb-serve-testing) - test server with an in-memory datastore +- [spicedb version](#reference-spicedb-version) - displays the version of SpiceDB + +## Reference: `spicedb datastore` + +Operations against the configured datastore + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +### Children commands + +- [spicedb datastore gc](#reference-spicedb-datastore-gc) - executes garbage collection +- [spicedb datastore head](#reference-spicedb-datastore-head) - compute the head database migration revision +- [spicedb datastore migrate](#reference-spicedb-datastore-migrate) - execute datastore schema migrations +- [spicedb datastore repair](#reference-spicedb-datastore-repair) - executes datastore repair + +## Reference: `spicedb datastore gc` + +Executes garbage collection against the datastore + +``` +spicedb datastore gc [flags] +``` + +### Options + +``` + --datastore-allowed-migrations stringArray migration levels that will not fail the health check (in addition to the current head migration) + --datastore-bootstrap-files strings bootstrap data yaml files to load + --datastore-bootstrap-overwrite overwrite any existing data with bootstrap data (this can be quite slow) + --datastore-bootstrap-timeout duration maximum duration before timeout for the bootstrap data to be written (default 10s) + --datastore-conn-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-write-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-write-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-write-max-open int number of concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-pool-write-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-uri string connection string used by remote datastores (e.g. "postgres://postgres:password@localhost:5432/spicedb") + --datastore-connect-rate duration rate at which new connections are allowed to the datastore (at a rate of 1/duration) (cockroach driver only) (default 100ms) + --datastore-connection-balancing enable connection balancing between database nodes (cockroach driver only) (default true) + --datastore-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-disable-watch-support disable watch support (only enable if you absolutely do not need watch) + --datastore-engine string type of datastore to initialize ("cockroachdb", "mysql", "postgres", "spanner") (default "memory") + --datastore-experimental-column-optimization enable experimental column optimization (default true) + --datastore-follower-read-delay-duration duration amount of time to subtract from non-sync revision timestamps to ensure they are sufficiently in the past to enable follower reads (cockroach and spanner drivers only) or read replicas (postgres and mysql drivers only) (default 4.8s) + --datastore-gc-interval duration amount of time between passes of garbage collection (postgres driver only) (default 3m0s) + --datastore-gc-max-operation-time duration maximum amount of time a garbage collection pass can operate before timing out (postgres driver only) (default 1m0s) + --datastore-gc-window duration amount of time before revisions are garbage collected (default 24h0m0s) + --datastore-include-query-parameters-in-traces include query parameters in traces (postgres and CRDB drivers only) + --datastore-max-tx-retries int number of times a retriable transaction should be retried (default 10) + --datastore-migration-phase string datastore-specific flag that should be used to signal to a datastore which phase of a multi-step migration it is in + --datastore-mysql-table-prefix string prefix to add to the name of all SpiceDB database tables + --datastore-prometheus-metrics set to false to disabled metrics from the datastore (do not use for Spanner; setting to false will disable metrics to the configured metrics store in Spanner) (default true) + --datastore-read-replica-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-read-replica-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-read-replica-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-uri stringArray connection string used by remote datastores for read replicas (e.g. "postgres://postgres:password@localhost:5432/spicedb"). Only supported for postgres and mysql. + --datastore-read-replica-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-readonly set the service to read-only mode + --datastore-relationship-integrity-current-key-filename string current key filename for relationship integrity checks + --datastore-relationship-integrity-current-key-id string current key id for relationship integrity checks + --datastore-relationship-integrity-enabled enables relationship integrity checks. only supported on CRDB + --datastore-relationship-integrity-expired-keys stringArray config for expired keys for relationship integrity checks + --datastore-request-hedging enable request hedging + --datastore-request-hedging-initial-slow-value duration initial value to use for slow datastore requests, before statistics have been collected (default 10ms) + --datastore-request-hedging-max-requests uint maximum number of historical requests to consider (default 1000000) + --datastore-request-hedging-quantile float quantile of historical datastore request time over which a request will be considered slow (default 0.95) + --datastore-revision-quantization-interval duration boundary interval to which to round the quantized revision (default 5s) + --datastore-revision-quantization-max-staleness-percent float float percentage (where 1 = 100%) of the revision quantization interval where we may opt to select a stale revision for performance reasons. Defaults to 0.1 (representing 10%) (default 0.1) + --datastore-spanner-credentials string path to service account key credentials file with access to the cloud spanner instance (omit to use application default credentials) + --datastore-spanner-emulator-host string URI of spanner emulator instance used for development and testing (e.g. localhost:9010) + --datastore-spanner-max-sessions uint maximum number of sessions across all Spanner gRPC connections the client can have at a given time (default 400) + --datastore-spanner-metrics string configure the metrics that are emitted by the Spanner datastore ("none", "native", "otel", "deprecated-prometheus") (default "otel") + --datastore-spanner-min-sessions uint minimum number of sessions across all Spanner gRPC connections the client can have at a given time (default 100) + --datastore-tx-overlap-key string static key to touch when writing to ensure transactions overlap (only used if --datastore-tx-overlap-strategy=static is set; cockroach driver only) (default "key") + --datastore-tx-overlap-strategy string strategy to generate transaction overlap keys ("request", "prefix", "static", "insecure") (cockroach driver only - see https://spicedb.dev/d/crdb-overlap for details)" (default "static") + --datastore-watch-buffer-length uint16 how large the watch buffer should be before blocking (default 1024) + --datastore-watch-buffer-write-timeout duration how long the watch buffer should queue before forcefully disconnecting the reader (default 1s) + --datastore-watch-connect-timeout duration how long the watch connection should wait before timing out (cockroachdb driver only) (default 1s) + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb datastore head` + +compute the head database migration revision + +``` +spicedb datastore head [flags] +``` + +### Options + +``` + --datastore-engine string type of datastore to initialize ("cockroachdb", "mysql", "postgres", "spanner") (default "postgres") + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb datastore migrate` + +Executes datastore schema migrations for the datastore. +The special value "head" can be used to migrate to the latest revision. + +``` +spicedb datastore migrate [revision] [flags] +``` + +### Options + +``` + --datastore-conn-uri string connection string used by remote datastores (e.g. "postgres://postgres:password@localhost:5432/spicedb") + --datastore-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-engine string type of datastore to initialize ("cockroachdb", "mysql", "postgres", "spanner") (default "memory") + --datastore-mysql-table-prefix string prefix to add to the name of all mysql database tables + --datastore-spanner-credentials string path to service account key credentials file with access to the cloud spanner instance (omit to use application default credentials) + --datastore-spanner-emulator-host string URI of spanner emulator instance used for development and testing (e.g. localhost:9010) + --migration-backfill-batch-size uint number of items to migrate per iteration of a datastore backfill (default 1000) + --migration-timeout duration defines a timeout for the execution of the migration, set to 1 hour by default (default 1h0m0s) + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb datastore repair` + +Executes a repair operation for the datastore + +``` +spicedb datastore repair [flags] +``` + +### Options + +``` + --datastore-allowed-migrations stringArray migration levels that will not fail the health check (in addition to the current head migration) + --datastore-bootstrap-files strings bootstrap data yaml files to load + --datastore-bootstrap-overwrite overwrite any existing data with bootstrap data (this can be quite slow) + --datastore-bootstrap-timeout duration maximum duration before timeout for the bootstrap data to be written (default 10s) + --datastore-conn-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-write-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-write-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-write-max-open int number of concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-pool-write-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-uri string connection string used by remote datastores (e.g. "postgres://postgres:password@localhost:5432/spicedb") + --datastore-connect-rate duration rate at which new connections are allowed to the datastore (at a rate of 1/duration) (cockroach driver only) (default 100ms) + --datastore-connection-balancing enable connection balancing between database nodes (cockroach driver only) (default true) + --datastore-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-disable-watch-support disable watch support (only enable if you absolutely do not need watch) + --datastore-engine string type of datastore to initialize ("cockroachdb", "mysql", "postgres", "spanner") (default "memory") + --datastore-experimental-column-optimization enable experimental column optimization (default true) + --datastore-follower-read-delay-duration duration amount of time to subtract from non-sync revision timestamps to ensure they are sufficiently in the past to enable follower reads (cockroach and spanner drivers only) or read replicas (postgres and mysql drivers only) (default 4.8s) + --datastore-gc-interval duration amount of time between passes of garbage collection (postgres driver only) (default 3m0s) + --datastore-gc-max-operation-time duration maximum amount of time a garbage collection pass can operate before timing out (postgres driver only) (default 1m0s) + --datastore-gc-window duration amount of time before revisions are garbage collected (default 24h0m0s) + --datastore-include-query-parameters-in-traces include query parameters in traces (postgres and CRDB drivers only) + --datastore-max-tx-retries int number of times a retriable transaction should be retried (default 10) + --datastore-migration-phase string datastore-specific flag that should be used to signal to a datastore which phase of a multi-step migration it is in + --datastore-mysql-table-prefix string prefix to add to the name of all SpiceDB database tables + --datastore-prometheus-metrics set to false to disabled metrics from the datastore (do not use for Spanner; setting to false will disable metrics to the configured metrics store in Spanner) (default true) + --datastore-read-replica-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-read-replica-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-read-replica-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-uri stringArray connection string used by remote datastores for read replicas (e.g. "postgres://postgres:password@localhost:5432/spicedb"). Only supported for postgres and mysql. + --datastore-read-replica-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-readonly set the service to read-only mode + --datastore-relationship-integrity-current-key-filename string current key filename for relationship integrity checks + --datastore-relationship-integrity-current-key-id string current key id for relationship integrity checks + --datastore-relationship-integrity-enabled enables relationship integrity checks. only supported on CRDB + --datastore-relationship-integrity-expired-keys stringArray config for expired keys for relationship integrity checks + --datastore-request-hedging enable request hedging + --datastore-request-hedging-initial-slow-value duration initial value to use for slow datastore requests, before statistics have been collected (default 10ms) + --datastore-request-hedging-max-requests uint maximum number of historical requests to consider (default 1000000) + --datastore-request-hedging-quantile float quantile of historical datastore request time over which a request will be considered slow (default 0.95) + --datastore-revision-quantization-interval duration boundary interval to which to round the quantized revision (default 5s) + --datastore-revision-quantization-max-staleness-percent float float percentage (where 1 = 100%) of the revision quantization interval where we may opt to select a stale revision for performance reasons. Defaults to 0.1 (representing 10%) (default 0.1) + --datastore-spanner-credentials string path to service account key credentials file with access to the cloud spanner instance (omit to use application default credentials) + --datastore-spanner-emulator-host string URI of spanner emulator instance used for development and testing (e.g. localhost:9010) + --datastore-spanner-max-sessions uint maximum number of sessions across all Spanner gRPC connections the client can have at a given time (default 400) + --datastore-spanner-metrics string configure the metrics that are emitted by the Spanner datastore ("none", "native", "otel", "deprecated-prometheus") (default "otel") + --datastore-spanner-min-sessions uint minimum number of sessions across all Spanner gRPC connections the client can have at a given time (default 100) + --datastore-tx-overlap-key string static key to touch when writing to ensure transactions overlap (only used if --datastore-tx-overlap-strategy=static is set; cockroach driver only) (default "key") + --datastore-tx-overlap-strategy string strategy to generate transaction overlap keys ("request", "prefix", "static", "insecure") (cockroach driver only - see https://spicedb.dev/d/crdb-overlap for details)" (default "static") + --datastore-watch-buffer-length uint16 how large the watch buffer should be before blocking (default 1024) + --datastore-watch-buffer-write-timeout duration how long the watch buffer should queue before forcefully disconnecting the reader (default 1s) + --datastore-watch-connect-timeout duration how long the watch connection should wait before timing out (cockroachdb driver only) (default 1s) + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb lsp` + +serve language server protocol + +``` +spicedb lsp [flags] +``` + +### Options + +``` + --addr string address to listen on to serve LSP (default "-") + --stdio enable stdio mode for LSP (default true) +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb serve` + +A database that stores, computes, and validates application permissions + +``` +spicedb serve [flags] +``` + +### Examples + +``` + No TLS and in-memory: + spicedb serve --grpc-preshared-key "somerandomkeyhere" + + TLS and a real datastore: + spicedb serve --grpc-preshared-key "realkeyhere" --grpc-tls-cert-path path/to/tls/cert --grpc-tls-key-path path/to/tls/key \ + --http-tls-cert-path path/to/tls/cert --http-tls-key-path path/to/tls/key \ + --datastore-engine postgres --datastore-conn-uri "postgres-connection-string-here" + +``` + +### Options + +``` + --datastore-allowed-migrations stringArray migration levels that will not fail the health check (in addition to the current head migration) + --datastore-bootstrap-files strings bootstrap data yaml files to load + --datastore-bootstrap-overwrite overwrite any existing data with bootstrap data (this can be quite slow) + --datastore-bootstrap-timeout duration maximum duration before timeout for the bootstrap data to be written (default 10s) + --datastore-conn-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-conn-pool-write-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-conn-pool-write-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-conn-pool-write-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-conn-pool-write-max-open int number of concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-pool-write-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 10) + --datastore-conn-uri string connection string used by remote datastores (e.g. "postgres://postgres:password@localhost:5432/spicedb") + --datastore-connect-rate duration rate at which new connections are allowed to the datastore (at a rate of 1/duration) (cockroach driver only) (default 100ms) + --datastore-connection-balancing enable connection balancing between database nodes (cockroach driver only) (default true) + --datastore-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-disable-watch-support disable watch support (only enable if you absolutely do not need watch) + --datastore-engine string type of datastore to initialize ("cockroachdb", "mysql", "postgres", "spanner") (default "memory") + --datastore-experimental-column-optimization enable experimental column optimization (default true) + --datastore-follower-read-delay-duration duration amount of time to subtract from non-sync revision timestamps to ensure they are sufficiently in the past to enable follower reads (cockroach and spanner drivers only) or read replicas (postgres and mysql drivers only) (default 4.8s) + --datastore-gc-interval duration amount of time between passes of garbage collection (postgres driver only) (default 3m0s) + --datastore-gc-max-operation-time duration maximum amount of time a garbage collection pass can operate before timing out (postgres driver only) (default 1m0s) + --datastore-gc-window duration amount of time before revisions are garbage collected (default 24h0m0s) + --datastore-include-query-parameters-in-traces include query parameters in traces (postgres and CRDB drivers only) + --datastore-max-tx-retries int number of times a retriable transaction should be retried (default 10) + --datastore-migration-phase string datastore-specific flag that should be used to signal to a datastore which phase of a multi-step migration it is in + --datastore-mysql-table-prefix string prefix to add to the name of all SpiceDB database tables + --datastore-prometheus-metrics set to false to disabled metrics from the datastore (do not use for Spanner; setting to false will disable metrics to the configured metrics store in Spanner) (default true) + --datastore-read-replica-conn-pool-read-healthcheck-interval duration amount of time between connection health checks in a remote datastore's connection pool (default 30s) + --datastore-read-replica-conn-pool-read-max-idletime duration maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime duration maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) + --datastore-read-replica-conn-pool-read-max-lifetime-jitter duration waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection (default: 20% of max lifetime, 30m for CockroachDB) + --datastore-read-replica-conn-pool-read-max-open int number of concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-pool-read-min-open int number of minimum concurrent connections open in a remote datastore's connection pool (default 20) + --datastore-read-replica-conn-uri stringArray connection string used by remote datastores for read replicas (e.g. "postgres://postgres:password@localhost:5432/spicedb"). Only supported for postgres and mysql. + --datastore-read-replica-credentials-provider-name string retrieve datastore credentials dynamically using ("aws-iam") + --datastore-readonly set the service to read-only mode + --datastore-relationship-integrity-current-key-filename string current key filename for relationship integrity checks + --datastore-relationship-integrity-current-key-id string current key id for relationship integrity checks + --datastore-relationship-integrity-enabled enables relationship integrity checks. only supported on CRDB + --datastore-relationship-integrity-expired-keys stringArray config for expired keys for relationship integrity checks + --datastore-request-hedging enable request hedging + --datastore-request-hedging-initial-slow-value duration initial value to use for slow datastore requests, before statistics have been collected (default 10ms) + --datastore-request-hedging-max-requests uint maximum number of historical requests to consider (default 1000000) + --datastore-request-hedging-quantile float quantile of historical datastore request time over which a request will be considered slow (default 0.95) + --datastore-revision-quantization-interval duration boundary interval to which to round the quantized revision (default 5s) + --datastore-revision-quantization-max-staleness-percent float float percentage (where 1 = 100%) of the revision quantization interval where we may opt to select a stale revision for performance reasons. Defaults to 0.1 (representing 10%) (default 0.1) + --datastore-schema-watch-heartbeat duration heartbeat time on the schema watch in the datastore (if supported). 0 means to default to the datastore's minimum. (default 1s) + --datastore-spanner-credentials string path to service account key credentials file with access to the cloud spanner instance (omit to use application default credentials) + --datastore-spanner-emulator-host string URI of spanner emulator instance used for development and testing (e.g. localhost:9010) + --datastore-spanner-max-sessions uint maximum number of sessions across all Spanner gRPC connections the client can have at a given time (default 400) + --datastore-spanner-metrics string configure the metrics that are emitted by the Spanner datastore ("none", "native", "otel", "deprecated-prometheus") (default "otel") + --datastore-spanner-min-sessions uint minimum number of sessions across all Spanner gRPC connections the client can have at a given time (default 100) + --datastore-tx-overlap-key string static key to touch when writing to ensure transactions overlap (only used if --datastore-tx-overlap-strategy=static is set; cockroach driver only) (default "key") + --datastore-tx-overlap-strategy string strategy to generate transaction overlap keys ("request", "prefix", "static", "insecure") (cockroach driver only - see https://spicedb.dev/d/crdb-overlap for details)" (default "static") + --datastore-watch-buffer-length uint16 how large the watch buffer should be before blocking (default 1024) + --datastore-watch-buffer-write-timeout duration how long the watch buffer should queue before forcefully disconnecting the reader (default 1s) + --datastore-watch-connect-timeout duration how long the watch connection should wait before timing out (cockroachdb driver only) (default 1s) + --disable-version-response disables version response support in the API + --dispatch-cache-enabled enable caching (default true) + --dispatch-cache-max-cost string upper bound cache size in bytes or percent of available memory (default "30%") + --dispatch-cache-metrics enable cache metrics (default true) + --dispatch-cache-num-counters int number of TinyLFU samples to track (default 10000) + --dispatch-check-permission-concurrency-limit uint16 maximum number of parallel goroutines to create for each check request or subrequest. defaults to --dispatch-concurrency-limit + --dispatch-chunk-size uint16 maximum number of object IDs in a dispatched request (default 100) + --dispatch-cluster-addr string address to listen on to serve dispatch (default ":50053") + --dispatch-cluster-cache-enabled enable caching (default true) + --dispatch-cluster-cache-max-cost string upper bound cache size in bytes or percent of available memory (default "70%") + --dispatch-cluster-cache-metrics enable cache metrics (default true) + --dispatch-cluster-cache-num-counters int number of TinyLFU samples to track (default 100000) + --dispatch-cluster-enabled enable dispatch gRPC server + --dispatch-cluster-max-conn-age duration how long a connection serving dispatch should be able to live (default 30s) + --dispatch-cluster-max-workers uint32 set the number of workers for this server (0 value means 1 worker per request) + --dispatch-cluster-network string network type to serve dispatch ("tcp", "tcp4", "tcp6", "unix", "unixpacket") (default "tcp") + --dispatch-cluster-tls-cert-path string local path to the TLS certificate used to serve dispatch + --dispatch-cluster-tls-key-path string local path to the TLS key used to serve dispatch + --dispatch-concurrency-limit uint16 maximum number of parallel goroutines to create for each request or subrequest (default 50) + --dispatch-hashring-replication-factor uint16 set the replication factor of the consistent hasher used for the dispatcher (default 100) + --dispatch-hashring-spread uint8 set the spread of the consistent hasher used for the dispatcher (default 1) + --dispatch-lookup-resources-concurrency-limit uint16 maximum number of parallel goroutines to create for each lookup resources request or subrequest. defaults to --dispatch-concurrency-limit + --dispatch-lookup-subjects-concurrency-limit uint16 maximum number of parallel goroutines to create for each lookup subjects request or subrequest. defaults to --dispatch-concurrency-limit + --dispatch-max-depth uint32 maximum recursion depth for nested calls (default 50) + --dispatch-reachable-resources-concurrency-limit uint16 maximum number of parallel goroutines to create for each reachable resources request or subrequest. defaults to --dispatch-concurrency-limit + --dispatch-upstream-addr string upstream grpc address to dispatch to + --dispatch-upstream-ca-path string local path to the TLS CA used when connecting to the dispatch cluster + --dispatch-upstream-timeout duration maximum duration of a dispatch call an upstream cluster before it times out (default 1m0s) + --enable-experimental-watchable-schema-cache enables the experimental schema cache which makes use of the Watch API for automatic updates + --enable-performance-insight-metrics enables performance insight metrics, which are used to track the latency of API calls by shape + --enable-revision-heartbeat enables support for revision heartbeat, used to create a synthetic revision on an interval defined by the quantization window (postgres only) (default true) + --experimental-dispatch-secondary-maximum-primary-hedging-delays stringToString maximum number of hedging delays to use for each request type to delay the primary request. default is 5ms (default []) + --experimental-dispatch-secondary-upstream-addrs stringToString secondary upstream addresses for dispatches, each with a name (default []) + --experimental-dispatch-secondary-upstream-exprs stringToString map from request type to its associated CEL expression, which returns the secondary upstream(s) to be used for the request (default []) + --grpc-addr string address to listen on to serve gRPC (default ":50051") + --grpc-enabled enable gRPC gRPC server (default true) + --grpc-log-requests-enabled logs API request payloads + --grpc-log-responses-enabled logs API response payloads + --grpc-max-conn-age duration how long a connection serving gRPC should be able to live (default 30s) + --grpc-max-workers uint32 set the number of workers for this server (0 value means 1 worker per request) + --grpc-network string network type to serve gRPC ("tcp", "tcp4", "tcp6", "unix", "unixpacket") (default "tcp") + --grpc-preshared-key strings preshared key(s) to require for authenticated requests + --grpc-shutdown-grace-period duration amount of time after receiving sigint to continue serving + --grpc-tls-cert-path string local path to the TLS certificate used to serve gRPC + --grpc-tls-key-path string local path to the TLS key used to serve gRPC + --http-addr string address to listen on to serve gateway (default ":8443") + --http-enabled enable http gateway server + --http-tls-cert-path string local path to the TLS certificate used to serve gateway + --http-tls-key-path string local path to the TLS key used to serve gateway + --max-bulk-export-relationships-limit uint32 maximum number of relationships that can be exported in a single request (default 10000) + --max-caveat-context-size int maximum allowed size of request caveat context in bytes. A value of zero or less means no limit (default 4096) + --max-datastore-read-page-size uint limit on the maximum page size that we will load into memory from the datastore at one time (default 1000) + --max-delete-relationships-limit uint32 maximum number of relationships that can be deleted in a single request (default 1000) + --max-lookup-resources-limit uint32 maximum number of resources that can be looked up in a single request (default 1000) + --max-read-relationships-limit uint32 maximum number of relationships that can be read in a single request (default 1000) + --max-relationship-context-size int maximum allowed size of the context to be stored in a relationship (default 25000) + --metrics-addr string address to listen on to serve metrics (default ":9090") + --metrics-enabled enable http metrics server (default true) + --metrics-tls-cert-path string local path to the TLS certificate used to serve metrics + --metrics-tls-key-path string local path to the TLS key used to serve metrics + --ns-cache-enabled enable caching (default true) + --ns-cache-max-cost string upper bound cache size in bytes or percent of available memory (default "32MiB") + --ns-cache-metrics enable cache metrics (default true) + --ns-cache-num-counters int number of TinyLFU samples to track (default 1000) + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --schema-prefixes-required require prefixes on all object definitions in schemas + --streaming-api-response-delay-timeout duration max duration time elapsed between messages sent by the server-side to the client (responses) before the stream times out (default 30s) + --telemetry-ca-override-path string path to a custom CA to use with the telemetry endpoint + --telemetry-endpoint string endpoint to which telemetry is reported, empty string to disable (default "https://telemetry.authzed.com") + --telemetry-interval duration approximate period between telemetry reports, minimum 1 minute (default 1h0m0s) + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default + --update-relationships-max-preconditions-per-call uint16 maximum number of preconditions allowed for WriteRelationships and DeleteRelationships calls (default 1000) + --watch-api-heartbeat duration heartbeat time on the watch in the API. 0 means to default to the datastore's minimum. (default 1s) + --write-relationships-max-updates-per-call uint16 maximum number of updates allowed for WriteRelationships calls (default 1000) +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb serve-testing` + +An in-memory spicedb server which serves completely isolated datastores per client-supplied auth token used. + +``` +spicedb serve-testing [flags] +``` + +### Options + +``` + --grpc-addr string address to listen on to serve gRPC (default ":50051") + --grpc-enabled enable gRPC gRPC server (default true) + --grpc-max-conn-age duration how long a connection serving gRPC should be able to live (default 30s) + --grpc-max-workers uint32 set the number of workers for this server (0 value means 1 worker per request) + --grpc-network string network type to serve gRPC ("tcp", "tcp4", "tcp6", "unix", "unixpacket") (default "tcp") + --grpc-tls-cert-path string local path to the TLS certificate used to serve gRPC + --grpc-tls-key-path string local path to the TLS key used to serve gRPC + --http-addr string address to listen on to serve http (default ":8443") + --http-enabled enable http http server + --http-tls-cert-path string local path to the TLS certificate used to serve http + --http-tls-key-path string local path to the TLS key used to serve http + --load-configs strings configuration yaml files to load + --max-bulk-export-relationships-limit uint32 maximum number of relationships that can be exported in a single request (default 10000) + --max-caveat-context-size int maximum allowed size of request caveat context in bytes. A value of zero or less means no limit (default 4096) + --max-delete-relationships-limit uint32 maximum number of relationships that can be deleted in a single request (default 1000) + --max-lookup-resources-limit uint32 maximum number of resources that can be looked up in a single request (default 1000) + --max-read-relationships-limit uint32 maximum number of relationships that can be read in a single request (default 1000) + --max-relationship-context-size int maximum allowed size of the context to be stored in a relationship (default 25000) + --otel-endpoint string OpenTelemetry collector endpoint - the endpoint can also be set by using enviroment variables + --otel-insecure connect to the OpenTelemetry collector in plaintext + --otel-provider string OpenTelemetry provider for tracing ("none", "otlphttp", "otlpgrpc") (default "none") + --otel-sample-ratio float ratio of traces that are sampled (default 0.01) + --otel-service-name string service name for trace data (default "spicedb") + --otel-trace-propagator string OpenTelemetry trace propagation format ("b3", "w3c", "ottrace"). Add multiple propagators separated by comma. (default "w3c") + --pprof-block-profile-rate int sets the block profile sampling rate + --pprof-mutex-profile-rate int sets the mutex profile sampling rate + --readonly-grpc-addr string address to listen on to serve read-only gRPC (default ":50052") + --readonly-grpc-enabled enable read-only gRPC gRPC server (default true) + --readonly-grpc-max-conn-age duration how long a connection serving read-only gRPC should be able to live (default 30s) + --readonly-grpc-max-workers uint32 set the number of workers for this server (0 value means 1 worker per request) + --readonly-grpc-network string network type to serve read-only gRPC ("tcp", "tcp4", "tcp6", "unix", "unixpacket") (default "tcp") + --readonly-grpc-tls-cert-path string local path to the TLS certificate used to serve read-only gRPC + --readonly-grpc-tls-key-path string local path to the TLS key used to serve read-only gRPC + --readonly-http-addr string address to listen on to serve read-only HTTP (default ":8444") + --readonly-http-enabled enable http read-only HTTP server + --readonly-http-tls-cert-path string local path to the TLS certificate used to serve read-only HTTP + --readonly-http-tls-key-path string local path to the TLS key used to serve read-only HTTP + --termination-log-path string define the path to the termination log file, which contains a JSON payload to surface as reason for termination - disabled by default + --update-relationships-max-preconditions-per-call uint16 maximum number of preconditions allowed for WriteRelationships and DeleteRelationships calls (default 1000) + --write-relationships-max-updates-per-call uint16 maximum number of updates allowed for WriteRelationships calls (default 1000) +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` + +## Reference: `spicedb version` + +displays the version of SpiceDB + +``` +spicedb version [flags] +``` + +### Options + +``` + --include-deps include dependencies' versions +``` + +### Options Inherited From Parent Flags + +``` + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases +``` diff --git a/content/spicedb/concepts/consistency.mdx b/content/spicedb/concepts/consistency.mdx new file mode 100644 index 0000000..d733d61 --- /dev/null +++ b/content/spicedb/concepts/consistency.mdx @@ -0,0 +1,177 @@ +import YouTube from "react-youtube"; +import { Callout } from "nextra/components"; + +# Consistency + +Consistency is a fundamental concept in distributed systems, databases, and authorization. + +SpiceDB is no exception! +In fact, the paper that inspired SpiceDB is entitled "Zanzibar: Google's \***\*Consistent\*\***, Global Authorization System". + +## What is consistency? + +The following presentation submitted to the [CNCF] gives an overview and examples of consistency and why it's important: + +
+ + +[CNCF]: https://cncf.io + +## Consistency in SpiceDB + +In SpiceDB, there is a requirement for both proper consistency, as well as excellent performance. + +To achieve performance, SpiceDB implements a number of levels of caching, to ensure that repeated permissions checks do not need to be recomputed over and over again, so long as the underlying relationships behind those permissions have not changed. + +However, all caches suffer from the risk of becoming stale. +If a relationship has changed, and all the caches have not been updated or cleared, there is a risk of returning incorrect permission information; this problem is known as the [New Enemy Problem]. + +SpiceDB's solution is to leverage the [v1 API][v1-api]'s ability to specify the desired [consistency][consistency] level on a per-request basis by using [ZedTokens](#zedtokens). +This allows for the API consumers dynamically trade-off less fresh data for more performance when possible. + +Consistency is provided via the [Consistency message][msg] on supported API calls. + +[v1-api]: https://buf.build/authzed/api/tree/main/authzed/api/v1 +[consistency]: https://en.wikipedia.org/wiki/Data_consistency +[msg]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.Consistency + +### Defaults + +| API | Default Consistency | +| --------------------- | ------------------- | +| `WriteRelationships` | `fully_consistent` | +| `DeleteRelationships` | `fully_consistent` | +| `ReadSchema` | `fully_consistent` | +| `WriteSchema` | `fully_consistent` | +| All other APIs | `minimize_latency` | + +### Levels + +#### Minimize Latency + +`minimize_latency` will attempt to minimize the latency of the API call by selecting data that is most likely to exist in the cache. + + + If used exclusively, this can lead to a window of time where the [New Enemy + Problem] can occur. + + +[New Enemy Problem]: ./zanzibar#new-enemy-problem + +```proto +Consistency { minimize_latency: true } +``` + +#### At Least As Fresh + +`at_least_as_fresh` will ensure that all data used for computing the response is at least as fresh as the point-in-time specified in the [ZedToken]. + +If newer information is available, it will be used. + +```proto +Consistency { at_least_as_fresh: ZedToken { token: "..." } } +``` + +[ZedToken]: #zedtokens + +#### At Exact Snapshot + +`at_exact_snapshot` will ensure that all data used for computing the response is that found at the _exact_ point-in-time specified in the [ZedToken]. + + + Requests specifying `at_exact_snapshot` can fail with a _Snapshot Expired_ error because SpiceDB eventually collects garbage over time. + +It is recommended to only use this option if you are paginating over results within a short window. +This window is determined by the `--datastore-gc-window` flag. + + + +```proto +Consistency { at_exact_snapshot: ZedToken { token: "..." } } +``` + +[ZedToken]: #zedtokens + +#### Fully Consistent + +`fully_consistent` will ensure that all data used is fully consistent with the latest data available within the SpiceDB datastore. + +Note that the snapshot used will be loaded at the beginning of the API call, and that new data written _after_ the API starts executing will be ignored. + + + This consistency mode explicitly bypasses caching, dramatically impacting latency. + +If you need read-after-write consistency, consider using a [ZedToken] + +[ZedToken]: #zedtokens + + + +```proto +Consistency { fully_consistent: true } +``` + +## ZedTokens + +A ZedToken is an opaque token representing a point-in-time of the SpiceDB datastore, encoded for easy storage and transmission. +ZedTokens are used for data consistency guarantees when using the SpiceDB API. + +ZedToken is the SpiceDB equivalent of Google Zanzibar's [Zookie] concept which protects users from the [New Enemy Problem]. + +SpiceDB returns ZedTokens from the APIs that perform permission checks or modify data: + +- [CheckPermission] +- [BulkCheckPermission] +- [WriteRelationships] +- [DeleteRelationships] + +[Zookie]: https://authzed.com/zanzibar/2Dv_Aat_2Q:0.Py6NWBPg8:2U +[New Enemy Problem]: zanzibar#new-enemy-problem +[CheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.CheckPermission +[BulkCheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.BulkCheckPermission +[WriteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.WriteRelationships +[DeleteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.DeleteRelationships + +### Storing ZedTokens + +There are scenarios where it makes sense to store ZedTokens in an application's primary database. +The goal of this workflow is to ensure the application can query SpiceDB with consistency that is causally tied to the content of the protected resource. + +Stored ZedTokens should be updated under these events: + +- The resource is created or deleted +- The contents of the resource change +- Adding or removing access to the resource (e.g. writing a relationship) + +When these events happen, a new ZedToken is either returned or it should be requested by performing a check with full consistency. +The result should be stored alongside the newly updated content in the application database. + +For a Postgres table this can be a standard `text` column. +If a fixed-width column is preferred, we recommend `varchar(1024)`. + + + For this purpose, Google's Zanzibar has a [ContentChangeCheck API][ccc-api] because it doesn't support configurable consistency like SpiceDB. + +[ccc-api]: https://authzed.com/zanzibar#annotations/intro/content-change-check + + + +Data can be complex and designed with hierarchies in mind. +In these scenarios, the parent resource must be referenced. + +A simpler alternative is to perform read-after-write queries to SpiceDB with full consistency. +This is great for experimentation and getting started, but might not be ideal for production workloads. + +### Ignoring ZedTokens + +Some workloads and domains might not be sensitive to wall-clock-based permission races. +In those scenarios, you're free to totally ignore ZedTokens. + +You can configure the staleness window to a tolerable duration for your domain with the `spicedb serve --datastore-revision-quantization-interval` flag. + +## Further Reading + +- [Zed Tokens, Zookies, Consistency for Authorization](https://authzed.com/blog/zedtokens) +- [Consistency is the Key to Performance and Safety](https://authzed.com/blog/consistency-is-the-key-to-performance-and-safety) +- [Hotspot Caching in Google Zanzibar and SpiceDB](https://authzed.com/blog/hotspot-caching-in-google-zanzibar-and-spicedb) +- [The One Crucial Difference Between Spanner and CockroachDB](https://authzed.com/blog/prevent-newenemy-cockroachdb) diff --git a/content/spicedb/concepts/datastore-migrations.mdx b/content/spicedb/concepts/datastore-migrations.mdx new file mode 100644 index 0000000..7216555 --- /dev/null +++ b/content/spicedb/concepts/datastore-migrations.mdx @@ -0,0 +1,127 @@ +import { Callout } from "nextra/components"; + +# Datastore Migrations + +Like all actively developed software, SpiceDB has new versions of the software released on a regular cadence. +Updates are published to the [SpiceDB GitHub releases page] and announced via [Twitter] and [Discord]. + +Transitioning between versions is often as simple as executing a new binary or container, but there are times when updates are more complex. +For example, releases that include changes to datastore can require that users update to specific versions and perform a series of actions in order to update while avoiding any downtime. + + + This page explains migrating the schema of a datastore underlying SpiceDB. If + you need information about migrating between SpiceDB instances, go + [here](/spicedb/ops/data/migrations). If you need information about making + changes to a SpiceDB schema that result in a migration, go + [here](/spicedb/modeling/migrating-schema). + + + + SpiceDB strives to main compatibility across each version and its following minor version. + +You should refer to the Upgrade Notes section of each release to find instructions for updating to avoid downtime. + +To automate upgrades without downtime, consider deploying the [SpiceDB Operator]. + +[SpiceDB Operator]: /spicedb/ops/operator + + + +[SpiceDB GitHub releases page]: https://github.com/authzed/spicedb/releases +[Twitter]: https://twitter.com/authzed +[Discord]: https://authzed.com/discord +[datastore migrations]: #what-are-migrations + +## Migrations + +### What are migrations? + +Before a datastore can be used by SpiceDB or before running a new version of SpiceDB, you must execute all available migrations. + + + The only exception is the [memdb datastore](datastores#memdb) because it does + not persist any data. + + +In order to migrate a datastore, run the following command with your desired values: + +``` +spicedb migrate head \ + --datastore-engine $DESIRED_ENGINE \ + --datastore-conn-uri $CONNECTION_STRING +``` + +For all software that maintains data, there can be updates to code that rely on also updating the data, too. +The process of updating data to use new versions of software is called _migrating_. + +SpiceDB users will see migrations crop up in two places: when they update versions of SpiceDB and when they write backwards incompatible changes to their schema. +This document's focus is on the former; the latter is documented [here]. + +[here]: ../modeling/migrating-schema + +### SpiceDB migration tooling + +SpiceDB ships with a migration command: `spicedb migrate`. +This command powers all of Authzed products' zero down-time migrations, so it's guaranteed to be battle-tested and supportable. +And while you are free to explore other tools to help you migrate, we cannot recommend them. + +If you do not care about causing downtime or you are bringing up a new cluster, you can always run the following command to migrate: + +``` +spicedb migrate head +``` + +In most cases, this command will not actually cause downtime, but one should confirm that's the case before executing on production environments with uptime requirements. + +## DB Migration Compatibility + +On startup, SpiceDB checks to see whether its desired DB migration tag matches the DB migration tag held in the database. +If they differ, the instance will error out, which prevents an instance from coming up when it can't run against the current datastore DDL. + +SpiceDB only performs this check on startup, which means that a new DB migration won't cause existing SpiceDB instances to break (unless they restart for other reasons), +provided that the DDL is compatible with the existing instance. + +The information in the Upgrade Notes of a given release can help determine whether the DDL will be compatible between two versions. +Using the SpiceDB Operator is the easiest way to ensure that all DB migrations are compatible and applied correctly. + +### Overriding Migration Compatibility + +Under some circumstances, you may want to run a version of SpiceDB against a DB migration other than the one it's expecting. +If you know that the version of SpiceDB you want to run is compatible with the DB migration, you can use the `--datastore-allowed-migrations` +flag on `spicedb serve` to provide a list of compatible DB migrations: + +``` +spicedb serve <...> --datastore-allowed-migrations add-expiration-support --datastore-allowed-migrations add-transaction-metadata-table +``` + +More information on the use case/motivation is available in [this issue](https://github.com/authzed/spicedb/issues/2135). + +## Recommendations + +### Managed Service + +Rather than handling updates yourself, SpiceDB is offered as a [managed service]. + +- AuthZed Dedicated users can select the desired version of SpiceDB + +No matter which service you select, zero-downtime migrations are always performed. + +[managed service]: https://authzed.com/pricing + +### Operator + +If you are operating SpiceDB yourself, the recommended update workflow is to use the [SpiceDB Operator]. +Please see the [operator-specific update docs] for various update options. + +[operator-specific update docs]: /spicedb/ops/operator#updating-managed-spicedbclusters + +### Sequential Updates + +We highly recommend updating sequentially through SpiceDB minor versions (e.g. 1.0.0 -> 1.1.0) to avoid headaches. +Jumping many versions at once might cause you to miss instructions for a particular release that could lead to downtime. +The [SpiceDB Operator](#operator) automates this process. + +### Rolling Deployments + +We highly recommend a deployment orchestrator to help coordinate rolling out new instances of SpiceDB without dropping traffic. +SpiceDB has been developed with an eye towards running on Kubernetes, but other platforms should be well supported. diff --git a/content/spicedb/concepts/datastores.mdx b/content/spicedb/concepts/datastores.mdx new file mode 100644 index 0000000..d3f5455 --- /dev/null +++ b/content/spicedb/concepts/datastores.mdx @@ -0,0 +1,435 @@ +import { Callout } from "nextra/components"; + +# Datastores + +In order to reduce operational complexity, SpiceDB leverages existing, popular systems for persisting data. + +AuthZed has standardized our managed services on CockroachDB, but we give self-hosted customers the option to pick the datastore that best suits their operational requirements. + +- [CockroachDB](#cockroachdb) - Recommended for self hosted deployments with high throughput and/or multi-region requirements +- [Cloud Spanner](#cloud-spanner) - Recommended for self-hosted Google Cloud deployments +- [PostgreSQL](#postgresql) - Recommended for self-hosted single-region deployments +- [MySQL](#mysql) - Not recommended; only use if you cannot use PostgreSQL +- [memdb](#memdb) - Recommended for local development and integration testing against applications + +## CockroachDB + +### Usage Notes + + + SpiceDB's Watch API requires CockroachDB's [Experimental Changefeed] to be enabled. + +[Experimental Changefeed]: https://www.cockroachlabs.com/docs/v22.1/changefeed-for + + + +- Recommended for multi-region deployments, with configurable region awareness +- Enables horizontal scalability by adding more SpiceDB and CockroachDB instances +- Resiliency to individual CockroachDB instance failures +- Query and data balanced across the CockroachDB +- Setup and operational complexity of running CockroachDB + +### Developer Notes + +- Code can be found [here][crdb-code] +- Documentation can be found [here][crdb-godoc] +- Implemented using [pgx][pgx] for a SQL driver and connection pooling +- Has a native changefeed +- Stores migration revisions using the same strategy as [Alembic][alembic] + +[crdb-code]: https://github.com/authzed/spicedb/tree/main/internal/datastore/crdb +[crdb-godoc]: https://pkg.go.dev/github.com/authzed/spicedb/internal/datastore/crdb +[pgx]: https://pkg.go.dev/gopkg.in/jackc/pgx.v3 +[alembic]: https://alembic.sqlalchemy.org/en/latest/ + +### Configuration + +#### Required Parameters + +| Parameter | Description | Example | +| -------------------- | ----------------------------------------- | ----------------------------------------------------------------------------------------- | +| `datastore-engine` | the datastore engine | `--datastore-engine=cockroachdb` | +| `datastore-conn-uri` | connection string used to connect to CRDB | `--datastore-conn-uri="postgres://user:password@localhost:26257/spicedb?sslmode=disable"` | + +#### Optional Parameters + +| Parameter | Description | Example | +| ------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | --------------------------------------------------------------- | +| `datastore-max-tx-retries` | Maximum number of times to retry a query before raising an error | `--datastore-max-tx-retries=50` | +| `datastore-tx-overlap-strategy` | The overlap strategy to prevent New Enemy on CRDB (see below) | `--datastore-tx-overlap-strategy=static` | +| `datastore-tx-overlap-key` | The key to use for the overlap strategy (see below) | `--datastore-tx-overlap-key="foo"` | +| `datastore-conn-pool-read-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-idletime=30m0s` | +| `datastore-conn-pool-read-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-lifetime=30m0s` | +| `datastore-conn-pool-read-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-read-max-lifetime-jitter=6m` | +| `datastore-conn-pool-read-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-max-open=20` | +| `datastore-conn-pool-read-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-min-open=20` | +| `datastore-conn-pool-write-healthcheck-interval` | Amount of time between connection health checks in a remote datastore's connection pool (default 30s) | `--datastore-conn-pool-write-healthcheck-interval=30s` | +| `datastore-conn-pool-write-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-idletime=30m0s` | +| `datastore-conn-pool-write-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-lifetime=30m0s` | +| `datastore-conn-pool-write-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-write-max-lifetime-jitter=6m` | +| `datastore-conn-pool-write-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-max-open=10` | +| `datastore-conn-pool-write-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-min-open=10` | +| `datastore-query-split-size` | The (estimated) query size at which to split a query into multiple queries | `--datastore-query-split-size=5kb` | +| `datastore-gc-window` | Sets the window outside of which overwritten relationships are no longer accessible | `--datastore-gc-window=1s` | +| `datastore-revision-fuzzing-duration` | Sets a fuzzing window on all zookies/zedtokens | `--datastore-revision-fuzzing-duration=50ms` | +| `datastore-readonly` | Places the datastore into readonly mode | `--datastore-readonly=true` | +| `datastore-follower-read-delay-duration` | Amount of time to subtract from non-sync revision timestamps to ensure follower reads | `--datastore-follower-read-delay-duration=4.8s` | +| `datastore-relationship-integrity-enabled` | Enables relationship integrity checks, only supported on CRDB | `--datastore-relationship-integrity-enabled=false` | +| `datastore-relationship-integrity-current-key-id` | Current key id for relationship integrity checks | `--datastore-relationship-integrity-current-key-id="foo"` | +| `datastore-relationship-integrity-current-key-filename` | Current key filename for relationship integrity checks | `--datastore-relationship-integrity-current-key-filename="foo"` | +| `datastore-relationship-integrity-expired-keys` | Config for expired keys for relationship integrity checks | `--datastore-relationship-integrity-expired-keys="foo"` | + +#### Overlap Strategy + +In distributed systems, you can trade-off consistency for performance. + +CockroachDB datastore users that are willing to rely on more subtle guarantees to mitigate the [New Enemy Problem] can configure `--datastore-tx-overlap-strategy`. + +[New Enemy Problem]: /spicedb/concepts/zanzibar#new-enemy-problem + +The available strategies are: + +| Strategy | Description | +| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `static` (default) | All writes overlap to guarantee safety at the cost of write throughput | +| `prefix` | Only writes that contain objects with same prefix overlap (e.g. `tenant1/user` and `tenant2/user` can be written in concurrently) | +| `request` | Only writes with the same `io.spicedb.requestoverlapkey` header overlap enabling applications to decide on-the-fly which writes have causal dependencies. Writes without any header act the same as `insecure`. | +| `insecure` | No writes overlap, providing the best write throughput, but possibly leaving you vulnerable to the [New Enemy Problem] | + +For more information, refer to the [CockroachDB datastore README][crdb-readme] or our blog post "[The One Crucial Difference Between Spanner and CockroachDB][crdb-blog]". + +[crdb-readme]: https://github.com/authzed/spicedb/blob/main/internal/datastore/crdb/README.md +[crdb-blog]: https://authzed.com/blog/prevent-newenemy-cockroachdb + +#### Garbage Collection Window + + + As of February 2023, the [default garbage collection window] has changed to `1.25 hours` for CockroachDB Serverless and `4 hours` for CockroachDB Dedicated. + +[default garbage collection window]: https://github.com/cockroachdb/cockroach/issues/89233 + + + +SpiceDB warns if the garbage collection window as configured in CockroachDB is smaller than the SpiceDB configuration. + +If you need a longer time window for the Watch API or querying at exact snapshots, you can [adjust the value in CockroachDB][crdb-gc]: + +```sql +ALTER ZONE default CONFIGURE ZONE USING gc.ttlseconds = 90000; +``` + +[crdb-gc]: https://www.cockroachlabs.com/docs/stable/configure-replication-zones.html#replication-zone-variables + +#### Relationship Integrity + +Relationship Integrity is a new experimental feature in SpiceDB that ensures that data written into the supported backing datastores (currently: only CockroachDB) is validated as having been written by SpiceDB itself. + +- **What does relationship integrity ensure?** + Relationship integrity primarily ensures that all relationships written into the backing datastore were written via a trusted instance of SpiceDB or that the caller has access to the key(s) necessary to write those relationships. + It ensures that if someone gains access to the underlying datastore, they cannot simply write new relationships of their own invention. + +- **What does relationship integrity _not_ ensure?** + Since the relationship integrity feature signs each individual relationship, it does not ensure that removal of relationships is by a trusted party. + Schema is also currently unverified, so an untrusted party could change it as well. + Support for schema changes will likely come in a future version. + +##### Setting up relationship integrity + +To run with relationship integrity, new flags must be given to SpiceDB: + +```zed +spicedb serve ...existing flags... +--datastore-relationship-integrity-enabled +--datastore-relationship-integrity-current-key-id="somekeyid" +--datastore-relationship-integrity-current-key-filename="some.key" +``` + +Place the generated key contents (which must support an HMAC key) in `some.key` + +##### Deployment Process + +1. Start with a **clean** datastore for SpiceDB. **At this time, migrating an existing SpiceDB installation is not supported.** +2. Run the standard `migrate` command but with relationship integrity flags included. +3. Run SpiceDB with the relationship integrity flags included. + +## Cloud Spanner + +### Usage Notes + +- Requires a Google Cloud Account with an active Cloud Spanner instance +- Take advantage of Google's TrueTime. + The Spanner driver assumes the database is linearizable and skips the transaction overlap strategy required by CockroachDB. + +### Developer Notes + +- Code can be found [here][spanner-code] +- Documentation can be found [here][spanner-godoc] +- Starts a background [GC worker][gc-process] to clean up old entries from the manually-generated changelog table + +[spanner-code]: https://github.com/authzed/spicedb/tree/main/internal/datastore/spanner +[spanner-godoc]: https://pkg.go.dev/github.com/authzed/spicedb/internal/datastore/spanner +[gc-process]: https://github.com/authzed/spicedb/blob/main/internal/datastore/common/gc.go + +### Configuration + +- The [Cloud Spanner docs][spanner-docs] outline how to set up an instance +- Authentication via service accounts: The service account that runs migrations must have `Cloud Spanner Database Admin`; SpiceDB (non-migrations) must have `Cloud Spanner Database User`. + +[spanner-docs]: https://cloud.google.com/spanner + +#### Required Parameters + +| Parameter | Description | Example | +| -------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------- | +| `datastore-engine` | the datastore engine | `--datastore-engine=spanner` | +| `datastore-conn-uri` | the cloud spanner database identifier | `--datastore-conn-uri="projects/project-id/instances/instance-id/databases/database-id"` | + +#### Optional Parameters + +| Parameter | Description | Example | +| ---------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | +| `datastore-spanner-credentials` | JSON service account token (omit to use [application default credentials](https://cloud.google.com/docs/authentication/production)) | `--datastore-spanner-credentials=./spanner.json` | +| `datastore-gc-interval` | Amount of time to wait between garbage collection passes | `--datastore-gc-interval=3m` | +| `datastore-gc-window` | Sets the window outside of which overwritten relationships are no longer accessible | `--datastore-gc-window=1s` | +| `datastore-revision-fuzzing-duration` | Sets a fuzzing window on all zookies/zedtokens | `--datastore-revision-fuzzing-duration=50ms` | +| `datastore-readonly` | Places the datastore into readonly mode | `--datastore-readonly=true` | +| `datastore-follower-read-delay-duration` | Amount of time to subtract from non-sync revision timestamps to ensure stale reads | `--datastore-follower-read-delay-duration=4.8s` | + +## PostgreSQL + +### Usage Notes + +- Recommended for single-region deployments +- Postgres 15 or newer is required for optimal performance +- Resiliency to failures only when PostgreSQL is operating with a follower and proper failover +- Carries setup and operational complexity of running PostgreSQL +- Does not rely on any non-standard PostgreSQL extensions +- Compatible with managed PostgreSQL services (e.g. AWS RDS) +- Can be scaled out on read workloads using read replicas + + + SpiceDB's Watch API requires PostgreSQL's [Commit Timestamp tracking][commit-ts] to be enabled. + +This can be done by providing the `--track_commit_timestamp=on` flag, configuring `postgresql.conf`, or executing `ALTER SYSTEM SET track_commit_timestamp = on;` and restarting the instance. + +[commit-ts]: https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-TRACK-COMMIT-TIMESTAMP + + + +### Developer Notes + +- Code can be found [here][pg-code] +- Documentation can be found [here][pg-godoc] +- Implemented using [pgx][pgx] for a SQL driver and connection pooling +- Stores migration revisions using the same strategy as [Alembic][alembic] +- Implements its own [MVCC][mvcc] model by storing its data with transaction IDs + +[pg-code]: https://github.com/authzed/spicedb/tree/main/internal/datastore/postgres +[pg-godoc]: https://pkg.go.dev/github.com/authzed/spicedb/internal/datastore/postgres +[mvcc]: https://en.wikipedia.org/wiki/Multiversion_concurrency_control + +### Read Replicas + +SpiceDB supports Postgres read replicas and does it while retaining consistency guarantees. +Typical use cases are: + +- scale read workloads/offload reads from the primary +- deploy SpiceDB in other regions with primarily read workloads + +Read replicas are typically configured with asynchronous replication, which involves replication lag. +That would be problematic to SpiceDB's ability to solve the new enemy problem but it addresses the challenge by checking if a revision has been replicated into the target replica. +If missing, it will fall back to the primary. + +All API consistency options will leverage replicas, but the ones that benefit the most are those that involve some level of staleness as it increases the odds a revision has replicated. +`minimize_latency`, `at_least_as_fresh`, and `at_exact_snapshot` consistency modes have the highest chance of being redirected to a replica. + +SpiceDB supports Postgres replicas behind a load-balancer, and/or individually listing replica hosts. +When multiple URIs are provided, they will be queried using a round-robin strategy. +Please note that the maximum number of replica URIs to list is 16. + +Read replicas are configured with the `--datastore-read-replica-*` family of flags. + +SpiceDB supports [PgBouncer](https://www.pgbouncer.org/) connection pooler and is part of the test suite. + +#### Transaction IDs and MVCC + +The Postgres implementation of SpiceDB's internal MVCC mechanism involves storing the internal transaction ID count associated with a given transaction +in the rows written in that transaction. +Because this counter is instance-specific, there are ways in which the data in the datastore can become desynced with that internal counter. +Two concrete examples are the use of `pg_dump` and `pg_restore` to transfer data between an old instance and a new instance and setting up +logical replication between a previously-existing instance and a newly-created instance. + +If you encounter this, SpiceDB can behave as though there is no schema written, because the data (including the schema) is associated with a future transaction ID and therefore isn't "visible" to SpiceDB. +If you run into this issue, the fix is [documented here](https://authzed.com/docs/spicedb/concepts/commands#reference-spicedb-datastore-repair) + +### Configuration + +#### Required Parameters + +| Parameter | Description | Example | +| -------------------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `datastore-engine` | the datastore engine | `--datastore-engine=postgres` | +| `datastore-conn-uri` | connection string used to connect to PostgreSQL | `--datastore-conn-uri="postgres://postgres:password@localhost:5432/spicedb?sslmode=disable"` | + +#### Optional Parameters + +| Parameter | Description | Example | +| ------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | +| `datastore-conn-pool-read-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-idletime=30m0s` | +| `datastore-conn-pool-read-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-lifetime=30m0s` | +| `datastore-conn-pool-read-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-read-max-lifetime-jitter=6m` | +| `datastore-conn-pool-read-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-max-open=20` | +| `datastore-conn-pool-read-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-min-open=20` | +| `datastore-conn-pool-write-healthcheck-interval` | Amount of time between connection health checks in a remote datastore's connection pool (default 30s) | `--datastore-conn-pool-write-healthcheck-interval=30s` | +| `datastore-conn-pool-write-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-idletime=30m0s` | +| `datastore-conn-pool-write-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-lifetime=30m0s` | +| `datastore-conn-pool-write-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-write-max-lifetime-jitter=6m` | +| `datastore-conn-pool-write-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-max-open=10` | +| `datastore-conn-pool-write-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-min-open=10` | +| `datastore-query-split-size` | The (estimated) query size at which to split a query into multiple queries | `--datastore-query-split-size=5kb` | +| `datastore-gc-window` | Sets the window outside of which overwritten relationships are no longer accessible | `--datastore-gc-window=1s` | +| `datastore-revision-fuzzing-duration` | Sets a fuzzing window on all zookies/zedtokens | `--datastore-revision-fuzzing-duration=50ms` | +| `datastore-readonly` | Places the datastore into readonly mode | `--datastore-readonly=true` | +| `datastore-read-replica-conn-uri` | Connection string used by datastores for read replicas; only supported for postgres and MySQL | `--datastore-read-replica-conn-uri="postgres://postgres:password@localhost:5432/spicedb\"` | +| `datastore-read-replica-credentials-provider-name` | Retrieve datastore credentials dynamically using aws-iam | | +| `datastore-read-replica-conn-pool-read-healthcheck-interval` | amount of time between connection health checks in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-healthcheck-interval=30s` | +| `datastore-read-replica-conn-pool-read-max-idletime` | maximum amount of time a connection can idle in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-idletime=30m` | +| `datastore-read-replica-conn-pool-read-max-lifetime` | maximum amount of time a connection can live in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-lifetime=30m` | +| `datastore-read-replica-conn-pool-read-max-lifetime-jitter` | waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection to a read replica(default: 20% of max lifetime) | `--datastore-read-replica-conn-pool-read-max-lifetime-jitter=6m` | +| `datastore-read-replica-conn-pool-read-max-open` | number of concurrent connections open in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-open=20` | +| `datastore-read-replica-conn-pool-read-min-open` | number of minimum concurrent connections open in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-min-open=20` | + +## MySQL + +### Usage Notes + +- Recommended for single-region deployments +- Setup and operational complexity of running MySQL +- Does not rely on any non-standard MySQL extensions +- Compatible with managed MySQL services +- Can be scaled out on read workloads using read replicas + +### Developer Notes + +- Code can be found [here][mysql-code] +- Documentation can be found [here][mysql-godoc] +- Implemented using [Go-MySQL-Driver][go-mysql-driver] for a SQL driver +- Query optimizations are documented [here][mysql-executor] +- Implements its own [MVCC][mysql-mvcc] model by storing its data with transaction IDs + +[mysql-code]: https://github.com/authzed/spicedb/tree/main/internal/datastore/mysql +[mysql-godoc]: https://pkg.go.dev/github.com/authzed/spicedb/internal/datastore/mysql +[go-mysql-driver]: https://github.com/go-sql-driver/mysql +[mysql-executor]: https://github.com/authzed/spicedb/blob/main/internal/datastore/mysql/datastore.go#L317 +[mysql-mvcc]: https://en.wikipedia.org/wiki/Multiversion_concurrency_control + +### Read Replicas + + + Do not use a load balancer between SpiceDB and MySQL replicas because SpiceDB + will not be able to maintain consistency guarantees. + + +SpiceDB supports MySQL read replicas and does it while retaining consistency guarantees. +Typical use cases are: + +- scale read workloads/offload reads from the primary +- deploy SpiceDB in other regions with primarily read workloads + +Read replicas are typically configured with asynchronous replication, which involves replication lag. +That would be problematic to SpiceDB's ability to solve the new enemy problem but it addresses the challenge by checking if the revision has been replicated into the target replica. +If missing, it will fall back to the primary. +Compared to the Postgres implementation, MySQL support requires two roundtrips instead of one, which means it adds some extra latency overhead. + +All API consistency options will leverage replicas, but the ones that benefit the most are those that involve some level of staleness as it increases the odds a revision has replicated. +`minimize_latency`, `at_least_as_fresh`, and `at_exact_snapshot` consistency modes have the highest chance of being redirected to a replica. + +SpiceDB does not support MySQL replicas behind a load-balancer: you may only list replica hosts individually. +Failing to adhere to this would compromise consistency guarantees. +When multiple host URIs are provided, they will be queried using round-robin. +Please note that the maximum number of MySQL replica host URIs to list is 16. + +Read replicas are configured with the `--datastore-read-replica-*` family of flags. + +### Configuration + +#### Required Parameters + +| Parameter | Description | Example | +| -------------------- | ------------------------------------------ | ------------------------------------------------------------------------------ | +| `datastore-engine` | the datastore engine | `--datastore-engine=mysql` | +| `datastore-conn-uri` | connection string used to connect to MySQL | `--datastore-conn-uri="user:password@(localhost:3306)/spicedb?parseTime=True"` | + + + SpiceDB requires `--datastore-conn-uri` to contain the query parameter + `parseTime=True`. + + +#### Optional Parameters + +| Parameter | Description | Example | +| ------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | +| `datastore-conn-pool-read-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-idletime=30m0s` | +| `datastore-conn-pool-read-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-read-max-lifetime=30m0s` | +| `datastore-conn-pool-read-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-read-max-lifetime-jitter=6m` | +| `datastore-conn-pool-read-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-max-open=20` | +| `datastore-conn-pool-read-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 20) | `--datastore-conn-pool-read-min-open=20` | +| `datastore-conn-pool-write-healthcheck-interval` | Amount of time between connection health checks in a remote datastore's connection pool (default 30s) | `--datastore-conn-pool-write-healthcheck-interval=30s` | +| `datastore-conn-pool-write-max-idletime` | Maximum amount of time a connection can idle in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-idletime=30m0s` | +| `datastore-conn-pool-write-max-lifetime` | Maximum amount of time a connection can live in a remote datastore's connection pool (default 30m0s) | `--datastore-conn-pool-write-max-lifetime=30m0s` | +| `datastore-conn-pool-write-max-lifetime-jitter` | Waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection | `--datastore-conn-pool-write-max-lifetime-jitter=6m` | +| `datastore-conn-pool-write-max-open` | Number of concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-max-open=10` | +| `datastore-conn-pool-write-min-open` | Number of minimum concurrent connections open in a remote datastore's connection pool (default 10) | `--datastore-conn-pool-write-min-open=10` | +| `datastore-query-split-size` | The (estimated) query size at which to split a query into multiple queries | `--datastore-query-split-size=5kb` | +| `datastore-gc-window` | Sets the window outside of which overwritten relationships are no longer accessible | `--datastore-gc-window=1s` | +| `datastore-revision-fuzzing-duration` | Sets a fuzzing window on all zookies/zedtokens | `--datastore-revision-fuzzing-duration=50ms` | +| `datastore-mysql-table-prefix string` | Prefix to add to the name of all SpiceDB database tables | `--datastore-mysql-table-prefix=spicedb` | +| `datastore-readonly` | Places the datastore into readonly mode | `--datastore-readonly=true` | +| `datastore-read-replica-conn-uri` | Connection string used by datastores for read replicas; only supported for postgres and MySQL | `--datastore-read-replica-conn-uri="postgres://postgres:password@localhost:5432/spicedb\"` | +| `—datastore-read-replica-credentials-provider-name` | Retrieve datastore credentials dynamically using aws-iam | | +| `datastore-read-replica-conn-pool-read-healthcheck-interval` | amount of time between connection health checks in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-healthcheck-interval=30s` | +| `datastore-read-replica-conn-pool-read-max-idletime` | maximum amount of time a connection can idle in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-idletime=30m` | +| `datastore-read-replica-conn-pool-read-max-lifetime` | maximum amount of time a connection can live in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-lifetime=30m` | +| `datastore-read-replica-conn-pool-read-max-lifetime-jitter` | waits rand(0, jitter) after a connection is open for max lifetime to actually close the connection to a read replica(default: 20% of max lifetime) | `--datastore-read-replica-conn-pool-read-max-lifetime-jitter=6m` | +| `datastore-read-replica-conn-pool-read-max-open` | number of concurrent connections open in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-max-open=20` | +| `datastore-read-replica-conn-pool-read-min-open` | number of minimum concurrent connections open in a read-only replica datastore's connection pool | `--datastore-read-replica-conn-pool-read-min-open=20` | + +## memdb + +### Usage Notes + +- Fully ephemeral; _all_ data is lost when the process is terminated +- Intended for usage with SpiceDB itself and testing application integrations +- Cannot be ran highly-available as multiple instances will not share the same in-memory data + + + If you need an ephemeral datastore designed for validation or testing, see the + test server system in [Validating and Testing] + + +[validating and testing]: /spicedb/modeling/validation-testing-debugging + +### Developer Notes + +- Code can be found [here][memdb-code] +- Documentation can be found [here][memdb-godoc] +- Implements its own [MVCC][mvcc] model by storing its data with transaction IDs + +[memdb-code]: https://github.com/authzed/spicedb/tree/main/internal/datastore/memdb +[memdb-godoc]: https://pkg.go.dev/github.com/authzed/spicedb/internal/datastore/memdb + +### Configuration + +#### Required Parameters + +| Parameter | Description | Example | +| ------------------ | -------------------- | --------------------------- | +| `datastore-engine` | the datastore engine | `--datastore-engine memory` | + +#### Optional Parameters + +| Parameter | Description | Example | +| ------------------------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------- | +| `datastore-revision-fuzzing-duration` | Sets a fuzzing window on all zookies/zedtokens | `--datastore-revision-fuzzing-duration=50ms` | +| `datastore-gc-window` | Sets the window outside of which overwritten relationships are no longer accessible | `--datastore-gc-window=1s` | +| `datastore-readonly` | Places the datastore into readonly mode | `--datastore-readonly=true` | diff --git a/content/spicedb/concepts/expiring-relationships.mdx b/content/spicedb/concepts/expiring-relationships.mdx new file mode 100644 index 0000000..36832ce --- /dev/null +++ b/content/spicedb/concepts/expiring-relationships.mdx @@ -0,0 +1,175 @@ +import { Callout } from "nextra/components"; +import { InlinePlayground } from "@/components/playground"; + +# Writing Relationships that Expire + +A common use case is granting a user access to a resource for a limited time. + +Before SpiceDB v1.40, [caveats] were the recommended way to support time-bound permissions, but that has some limitations: + +[caveats]: caveats + +- It requires clients to provide the `now` timestamp. + This is additional complexity for clients. +- Expired caveats are not automatically garbage collected. + This can lead to many caveated relationships in the system and increase the costs of loading and evaluating those into the runtime. + +After SpiceDB v1.4.0, and if you need to grant temporary access to a resource, you can do so by writing relationships that expire after a certain time. + +The time must be specified in [RFC 3339 format]. + +[RFC 3339 format]: https://datatracker.ietf.org/doc/html/rfc3339#section-5.8 + + + The clock used to determine if a relationship is expired is that of the + underlying SpiceDB datastore. This gets trickier when using distributed + databases like CockroachDB or Spanner, where clocks have an uncertainty range. + When operating your own database, it's key to keep node clocks in sync - we + recommend services like [Amazon Time Sync + Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html). + You should evaluate the impact of clock drift in your application. + + +## Schema + +To enable expiration in your schema, add a `use expiration` clause to the top of the file. +Then the relations subject to expiration are marked using ` with expiration`: + +```zed +use expiration + +definition user {} + +definition resource { + relation viewer: user with expiration +} +``` + +## API + +To write a relationship that expires, use the `WriteRelationships` or `BulkImportRelationships` APIs, and set the `OptionalExpiresAt` field in the relationship: + +```textproto +WriteRelationshipsRequest { + Updates: [ + RelationshipUpdate{ + Operation: TOUCH + Relationship: { + Resource: { + ObjectType: "resource", + ObjectId: "someresource", + }, + Relation: "viewer", + Subject: { + ObjectType: "user", + ObjectId: "sarah", + }, + OptionalExpiresAt: "2022-12-31T23:59:59Z" + } + } + ] +} +``` + + + When using the WriteRelationships API, it is recommended to always use the + TOUCH operation to create and update expiring relationships. If a relationship + has expired but has not yet been garbage collected, using the CREATE operation + will return an error for that relationship. + + +## Playground + +To write a relationship that expires, use the following format: + +```yaml +resource:someresource#viewer@user:anne[expiration:2025-12-31T23:59:59Z] +``` + +or specify expirations in the `Expiration` column in the Relationship grid editor. + +
+ + +## zed + +To write a relationship that expires, use the `--expiration-time` flag: + +```shell zed +zed relationship create resource:someresource viewer user:anne --expiration-time "2025-12-31T23:59:59Z" +``` + +## Garbage Collection + +As soon as a relationship expires, it will no longer be used in permission checks. +However, the relationship in the datastore is not deleted right then, but rather is subject to garbage collection. + +Reclaiming expiring relationships is governed by the same mechanism (and flags) as the deletion of the history of +relationship changes that powers SpiceDB's own MVCC (Multi-Version Concurrency Control) and heavily depends on +the datastore chosen. + +- Datastores like Spanner and CockroachDB have built-in support for expiring SQL rows, so the database does Garbage Collection. + In both cases, expired relationships will be reclaimed after 24 hours, which can't be changed without directly manipulating the SQL schema. +- Datastores like Postgres and MySQL support it using the same GC job that reclaims old relationship versions, which runs every 5 minutes. + Unlike Spanner and CockroachDB, you can govern the GC window with the corresponding flags. + Relationships will be reclaimed after 24 hours by default. + + + The GC Window should be adjusted according to the application's needs. How far + back in time does your application need to go? If this is a common use case, + we recommend drastically reducing the GC window (e.g., 1 hour or 30 minutes). + This means SpiceDB will have to evaluate less data when serving authorization + checks, which can improve performance drastically in large-scale deployments. + + +## Migrating Off Of Expiration With Caveats + +If you implemented expiration using caveats, this section describes migrating to the new expiration feature. + +1. Rename your caveat if you had named it `expiration` +2. Add the new subject type to your relation, and also add a combination where both are used. For example: + + ```zed + caveat ttl(timeout duration, now string, timeout_creation_timestamp string) { + timestamp(now) - timestamp(timeout_creation_timestamp) < timeout + } + + definition user {} + + definition resource { + relation viewer: user with ttl + } + ``` + + Becomes: + + ```zed + use expiration + + caveat ttl(timeout duration, now string, timeout_creation_timestamp string) { + timestamp(now) - timestamp(timeout_creation_timestamp) < timeout + } + + definition user {} + + definition resource { + relation viewer: user with ttl | user with expiration | user with ttl and expiration + } + ``` + +3. Migrate all relationships to use both the caveat and the new expiration. + This is needed because only one relationship is allowed for a resource/permission/subject combination. +4. Validate that the new expiration feature works as expected by not providing the context for evaluating the `ttl` caveat. +5. Once validated, migrate completely to the new expiration feature by writing all relationships with only expiration + and without caveat. +6. Drop the caveat from your schema once the migration is completed: + + ```zed + use expiration + + definition user {} + + definition resource { + relation viewer: user with expiration + } + ``` diff --git a/content/spicedb/concepts/reflection-apis.mdx b/content/spicedb/concepts/reflection-apis.mdx new file mode 100644 index 0000000..687e410 --- /dev/null +++ b/content/spicedb/concepts/reflection-apis.mdx @@ -0,0 +1,134 @@ +import { Callout } from "nextra/components"; +import { InlinePlayground } from "@/components/playground"; + +# Reflection APIs + +The [Reflection APIs] in SpiceDB (starting at version v1.33.0) provide the ability to reflect on the stored schema +and type information to answer questions about the schema itself, as well as its permissions and +relations. + +[Reflection APIs]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.SchemaService.ReflectSchema + +## ReflectSchema + +`ReflectSchema` provides an API-driven means of receiving the structure of the current +schema stored in SpiceDB. + +It is designed primarily to allow callers to make dynamic decisions based on the structure of the +schema, such as being able to see all the permissions defined for a particular type of resource. + +```proto +ReflectSchemaRequest{} + +ReflectSchemaResponse{ + Definitions: []{ + { Name: "user" }, + { + Name: "organization", + Relations: []{ + { Name: "member", SubjectTypes: []{ { Name: "user" } }, ... }, + }, + }, + { + Name: "resource", + Comment: "// resource is some kind of resource", + Relations: []{ ... }, + Permissions: []{ ... }, + }, + }, +} +``` + +### Filtering + +`ReflectSchemaRequest` also includes support for filters which can be used to filter +the response to a specific subset of the schema: + +```proto +ReflectSchemaRequest{ + OptionalFilters: []{ + { + OptionalDefinitionNameFilter: "a" // filter to defs starting with `a` + }, + }, +} +``` + +## DiffSchema + +`DiffSchema` provides an API-driven means of comparing the currently stored schema +in SpiceDB to another schema. + +This API is useful for tooling such as CI/CD that needs to determine what changes, if any, +exist between the current schema and a future schema. + +```proto +DiffSchema{ + ComparisonSchema: """ + definition user {} + + // an added comment + definition organization { + relation member: user + } + + // resource is some kind of resource + definition resource { + relation viewer: user + relation editor: user + + relation org: organization + + permission edit = editor + permission view = viewer + editor + org->member + } + """ +} + +ReflectSchemaResponse{ + Diffs: []{ + { DefinitionDocCommentChanged: { Name: "organization", ... } }, + { PermissionExprChanged: { Name: "view", ... } }, + } +} +``` + +## DependentRelations + +`DependentRelations` is a reflection API that provides the list of relations and +permissions that are used to compute a particular permission. + +```proto +DependentRelationsRequest{ + DefinitionName: "resource" + PermissionName: "view" +} + +DependentRelationsResponse{ + Relations: []{ + { DefinitionName: "organization", RelationName: "member", IsPermission: false}, + { DefinitionName: "resource", RelationName: "org", IsPermission: false}, + { DefinitionName: "resource", RelationName: "viewer", IsPermission: false}, + { DefinitionName: "resource", RelationName: "edit", IsPermission: true}, + { DefinitionName: "resource", RelationName: "editor", IsPermission: false}, + } +} +``` + +## ComputablePermissions + +`ComputablePermissions` is the inverse of `DependentRelations`: it helps +to determine any permissions impacted by a change to a relation or permission. + +```proto +ComputablePermissionsRequest{ + DefinitionName: "resource" + RelationName: "viewer" +} + +ComputablePermissionsResponse{ + Permissions: []{ + { DefinitionName: "resource", RelationName: "view", IsPermission: true}, + } +} +``` diff --git a/content/spicedb/concepts/relationships.mdx b/content/spicedb/concepts/relationships.mdx new file mode 100644 index 0000000..f648bb1 --- /dev/null +++ b/content/spicedb/concepts/relationships.mdx @@ -0,0 +1,214 @@ +import { Callout } from "nextra/components"; + +# Relationships + +In SpiceDB, a functioning Permissions System is the combination of [Schema], which defines the structure of data, and Relationships, which are the data. + +[schema]: ./schema + +## Understanding Relationships + +Let's start with a simple schema that models a document sharing system: + +```zed +definition user {} + +definition team { + relation member: user +} + +definition document { + # both specific users and all members of specific teams can edit the document + relation editor: user | team#member +} +``` + +This schema defines three types of [Objects]: `user`, `team` and `document`. The `document` type has one **relation** defined on it: `editor`. + +A **relation** is like a class definition in Object-Oriented programming or a type in a strongly-typed language: it represents a possible type of connection defined in your schema. For example: "documents have editors". + +A **relationship** is a specific instance of a relation - it's the actual data. For example: "user `emilia` is an editor of document `readme`" + +### Relationship Syntax + +The syntax used for relationships in the [paper that popularized ReBAC](./zanzibar) and that we use throughout this website is: + +``` +document:readme#editor@user:emilia +``` + +Let's break this down: + +``` + resource subject + ID type + \ˍˍˍˍˍ\ \ˍˍ\ + document:readme#editor@user:emilia +/¯¯¯¯¯¯¯/ /¯¯¯¯¯/ /¯¯¯¯¯/ +resource relation subject + type ID +``` + +This relationship can be read as: "user `emilia` is an `editor` of document `readme`". Note how this is connecting two specific objects. + +We can also write relationships that link one object to a set of objects. + +``` +document:readme#editor@team:engineering#member +``` + +Let's break this down: + +``` + resource subject + ID type + \ˍˍˍˍˍ\ \ˍˍ\ + document:readme#editor@team:engineering#member +/¯¯¯¯¯¯¯/ /¯¯¯¯¯/ /¯¯¯¯¯¯¯¯¯¯//¯¯¯¯¯/ +resource relation subject subject + type ID relation +``` + +This relationship can be read as: "every object that has the `member` relation to `team:engineering` is an `editor` of document `readme`". + + + In a real system, Object IDs are most likely a computer-friendly string than something human readable. + Many use-cases use UUIDs or unsigned integers representing the primary key from that data's canonical datastore. + +Users are no exception to this pattern and can be [represented in various ways][modeling-users], such as the `sub` field of an JWT from an Identity Provider. + +Regardless of their representation, Object IDs must be **unique and stable** within the set of IDs for an Object Type. + + + +### Graph traversals + +At its core, authorization logic fundamentally reduces to asking: + +> Is this actor allowed to perform this action on this resource? + +For example: "Is user `emilia` allowed to `edit` document `readme`?" + +If you had these relationships written in SpiceDB: + +- `document:readme#editor@user:emilia` + +Then the answer is trivial: yes, `emilia` can edit the document. + +If, instead, you had these relationships written in SpiceDB: + +- `team:engineering#member@user:emilia` - emilia is on the engineering team +- `document:readme#editor@team:engineering#member` - every member on the engineering team can edit the readme + +When checking "Can user `emilia` edit document `readme`?", SpiceDB: + +1. Starts at `document:readme#editor` +2. Follows the `editor` relation to find `team:engineering#member` +3. Follows the `member` relation to find `user:emilia` + +[//]: # "TODO add drawing" + +Note how we followed a chain of relationships to answer the question. Or, put differently, we traversed a [graph]. + +The real power of ReBAC comes from transforming authorization questions into [graph reachability] problems, and then answering them efficiently: + +> Is there a chain of **relationships** starting at this resource and relation that ultimately reaches this subject? + +This is what makes relationships powerful: they are both **the question you ask** ("does this relationship path exist?") and, when you write many of them together, **they form the answer** (by creating paths through the graph that SpiceDB can traverse). + +[Objects]: ./schema#object-type-definitions +[Permission]: ./schema#permissions +[Relation]: ./schema#relations +[graph]: https://en.wikipedia.org/wiki/Graph_(abstract_data_type) +[graph reachability]: https://en.wikipedia.org/wiki/Reachability +[breadth-first search]: https://en.wikipedia.org/wiki/Breadth-first_search +[depth-first search]: https://en.wikipedia.org/wiki/Depth-first_search +[modeling-users]: ../modeling/representing-users + +## Writing Relationships + +It is the application's responsibility to keep the relationships within SpiceDB up-to-date and reflecting the state of the application; how an application does so can vary based on the specifics of the application, so below we outline a few approaches. + + + Want to learn more about writing relationships to SpiceDB, the various strategies and their pros and cons? + +Read our [blog post about writing relationships][write-blog]. + +[write-blog]: https://authzed.com/blog/writing-relationships-to-spicedb/ + + + +### SpiceDB-only relationships + +Sometimes an application does not even need to store permissions-related relationships in its relational database. + +Consider a permissions system that allows for teams of users to be created and used to access a resource. +In SpiceDB's schema, this could be represented as: + +```zed +definition user {} + +definition team { + relation member: user +} + +definition resource { + relation reader: user | team#member + permission view = reader +} +``` + +In the above example, the relationship between a resource and its teams, as well as a team and its members does not need to be stored in the application's database **at all**. + +Rather, this information can be stored solely in SpiceDB, and accessed by the application via a [ReadRelationships] or [ExpandPermissionsTree] call when necessary. + +[ReadRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#ReadRelationships +[ExpandPermissionsTree]: https://buf.build/authzed/api/docs/main:authzed.api.v1#ExpandPermissionTree + +### Two writes & commit + +The most common and straightforward way to store relationships in SpiceDB is to use a 2 phase commit-like approach, making use of a transaction from the relational database along with a [WriteRelationships] call to SpiceDB. + +[WriteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#WriteRelationships + +```python filename='Example of a 2PC-like approach' +try: + tx = db.transaction() + + # Write relationships during a transaction so that it can be aborted on exception + resp = spicedb_client.WriteRelationships(...) + + tx.add(db_models.Document( + id=request.document_id, + owner=user_id, + zedtoken=resp.written_at + )) + tx.commit() +except: + # Delete relationships written to SpiceDB and re-raise the exception + tx.abort() + spicedb_client.DeleteRelationships(...) + raise +``` + +### Streaming commits + +Another approach is to stream updates to both a relational database and SpiceDB via a third party streaming system such as [Kafka], using a pattern known as [Command Query Responsibility Segregation] (CQRS) + +[Kafka]: https://kafka.apache.org/ +[Command Query Responsibility Segregation]: https://www.confluent.io/blog/event-sourcing-cqrs-stream-processing-apache-kafka-whats-connection/ + +In this design, any updates to the relationships in both databases are published as **events** to the streaming service, with each event being consumed by a system which performs the updates in both the database and in SpiceDB. + +### Asynchronous Updates + + + Before adopting an asynchronous system, you should deeply consider the [consistency] implications. + +[consistency]: ./consistency + + + +If an application does not require up-to-the-second consistent permissions checking, and some replication lag in permissions checking is acceptable, then asynchronous updates of the relationships in SpiceDB can be used. + +In this design, a synchronization process, typically running in the background, is used to write relationships to SpiceDB in reaction to any changes that occur in the primary relational database. diff --git a/content/spicedb/concepts/schema.mdx b/content/spicedb/concepts/schema.mdx new file mode 100644 index 0000000..e0c8079 --- /dev/null +++ b/content/spicedb/concepts/schema.mdx @@ -0,0 +1,524 @@ +import { Callout } from "nextra/components"; +import YouTube from "react-youtube"; +import { InlinePlayground } from "@/components/playground"; + +# Schema Language Reference + +A SpiceDB schema defines the types of objects found your application, how those objects can relate to one another, and the permissions that can be computed off of those relations. + +This page is a reference guide that uses examples that are loosely based on trying to write a schema for Google Docs. For a detailed guide on how to write your own schema from scratch, see [Developing a Schema]. + +The schema language's extension for use on a file system is `.zed`, and you can experiment with schemas in real-time with the [Playground](https://play.authzed.com). + +[Developing a Schema]: ../modeling/developing-a-schema + +## Definitions + +The top level of a Schema consists of one or more [Object Type definitions](#object-type-definitions) and zero or more [Caveats](./caveats). + +### Object Type Definitions + +An Object Type definition is used to represent classes of objects. + +It might help to think about Object Type definitions as similar to a class definition in an Object-Oriented programming language. When you [write relationships](relationships), you will "instantiate" those classes. + +```zed +definition document {} + +definition group {} + +definition user {} +``` + +You can add prefixes to each definition, which is useful (for example) if you want to write a schema that supports multiple products within your organization. + +```zed +definition docs/document {} + +definition docs/folder {} + +definition iam/group {} + +definition iam/user {} +``` + +### Caveat Definitions + +Caveats are expressions that can return true or false, and they can be attached (by name) to relationships. + +They allow relationships to be defined conditionally: when executing permission checks (e.g. [CheckPermission]), the caveated relationship will only be considered present if the caveat expression evaluates to true at the time you run the `CheckPermission`. + +[CheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.CheckPermission + +```zed +caveat ip_allowlist(user_ip ipaddress, cidr string) { + user_ip.in_cidr(cidr) +} + +definition document { + relation reader: user with ip_allowlist +} +``` + +See the [Caveats documentation][caveats] to learn more. + +[caveats]: ./caveats + +## Relations + +A `relation` defines how two objects (or an object and subject) can relate to one another. +For example, a `reader` on a document, or a `member` of a group. + +Relations are always defined with a _name_ and one or more allowed _types_ of objects that can be the subjects of that relation. + +### Relations to specific objects + +In the schema below, the `member` relation on `group` and the `reader` relation on `document` both allow only concrete relationships to specific `users`. + +```zed +definition user {} + +definition group { + /** + * member defines who is part of a group + */ + relation member: user +} + +definition document { + /** + * reader relates a user that is a reader on the document + */ + relation reader: user +} +``` + +### Subject Relations + +In the example below, the `owner` relation allows you to grant "roles" to specific subjects and also _sets_ of subjects. + +```zed +definition user {} + +definition group { + /** + * member defines who is part of a group + */ + relation member: user +} + +definition document { + /** + * an owner can be a specific user, or the set of members which have that relation to the group. + * so you can write relationships such as: + * - document:budget#owner@user:anne, or + * - document:budget#owner@group:finance#member + */ + relation owner: user | group#member +} +``` + +### Wildcards + +Relations can specify wildcards to indicate that a grant can be made to the resource _type_ as a whole, rather than a particular resource. +This allows _public_ access to be granted to a particular subject type. + +For example, the following schema allows one to specify that _all_ users can be granted the ability to view a document: + +```zed +definition user {} + +definition document { + /** + * viewer can be granted to a specific user or granted to *all* users. + */ + relation viewer: user | user:* +} +``` + +To be made public, a wildcard relationship must be written linking the specific document to _any_ user: + +```relationship +document:public#viewer@user:* +``` + +Now _any_ user (present or future) that exists in SpiceDB is a `viewer` of object `document:public`. + + + Be **very careful** with wildcard support in your schema! **Only** grant it to + read permissions, unless you intend to allow for universal writing. + + +### Naming Relations + +Relations define how one object relates to another object/subject, and thus **relations should be named as nouns**, read as `{relation name} (of the object)`. + +Examples: + +| Name | Read as | +| -------- | ---------------------- | +| `reader` | reader of the document | +| `writer` | writer of the document | +| `member` | member of the group | +| `parent` | parent of the folder | + +## Permissions + +A permission defines a _computed_ set of subjects that have a permission of some kind on the object. +For example, is a user within the set of users that can `edit` a document. + +Permissions are always defined with a _name_, and an _expression_ with one or more _operations_ defining how that permission's allowed set of subjects is computed. + +```zed +definition user {} + +definition document { + relation writer: user + relation reader: user + + /** + * edit determines whether a user can edit the document. if you are writer, you can edit. + */ + permission edit = writer + + /** + * view determines whether a user can view the document. If you are reader or a writer, you can view. + */ + permission view = reader + writer +} +``` + + + When writing relationships in SpiceDB, you cannot write a relationship that + references a *permission*, only a relationship that references a *relation*. + This means that it's easy to change a permission, but not a relation. + + +### Operations + +Permissions support four kinds of operations: **union**, **intersection**, **exclusion** and **arrows**. + + +**Important: Union Precedence** + +For historical reasons, union (`+`) takes precedence over intersection (`&`) and exclusion (`-`), which can lead to unexpected results. +For example, `a + b & c` is evaluated as `(a + b) & c`, not `a + (b & c)`. + +We intend to add a flag to fix this precedence issue in the future. + +It is highly recommended to either: + +- Break complex expressions into intermediate permissions: + + ```zed + permission writers_and_admins = writer & admin + permission view = reader + writers_and_admins + ``` + +- Use explicit parentheses to clarify precedence: + + ```zed + permission view = reader + (writer & admin) + ``` + + + +#### `+` (Union) + +Unions together the relations/permissions referenced. + +Union is the most common operation and is used to join different relations or permissions together to form a set of allowed subjects. + +For example, to grant a permission `admin` to a document, a user must be either a `reader` or a `writer` (or both) of the document: + +```zed +permission admin = reader + writer +``` + +#### `&` (Intersection) + +Intersects the set of subjects found for the relations/permissions referenced. + +Intersection allows for a permission to only include those subjects that were found in **both** relations/permissions. + +For example, to grant a permission `admin` to a document, a user must be a `reader` AND a `writer` of the document: + +```zed +permission admin = reader & writer +``` + +#### `-` (Exclusion) + +Excludes the set of subjects found for the right side relation/permission from those found in the left side relation/permission. + +Exclusion allows for computing the difference between two sets of relations/permissions. + +For example, to grant a permission to a user that is `reader` but not the `writer` of a document: + +```zed +permission can_only_read = reader - writer +``` + +#### `->` (Arrow) + +Imagine a schema where a document is found under a folder: + +```zed +definition user {} + +definition folder { + relation reader: user +} + +definition document { + /** + * parent_folder defines the folder that holds this document + */ + relation parent_folder: folder +} +``` + +We likely want to allow any `reader` of the `parent_folder` to **also** be a reader of the `document`. + +To accomplish this, we can use the arrow operator to indicate that if a `user` has the `read` permission on the `parent_folder`, then the `user` can `read` the `document`: + +```zed {11} +definition user {} + +definition folder { + relation reader: user + permission read = reader +} + +definition document { + relation parent_folder: folder + + permission read = parent_folder->read +} +``` + +The expression `parent_folder->read` indicates to "walk" from the `parent_folder` of the `document`, and then to include the subjects found for the `read` permission of that folder. + +Making use of a `union`, we can also include the local `reader` relation, allowing the `read` permission on a document to check whether a user is a `reader` of a document or a `reader` of its parent folder. + +```zed +definition user {} + +definition folder { + relation reader: user + permission read = reader +} + +definition document { + relation parent_folder: folder + relation reader: user + + /** + * if a user has the reader relation, or + * if a user has the read permission on the parent_folder, + * then the user can read the document + */ + permission read = reader + parent_folder->read +} +``` + + + It is _recommended_ that the right side of all arrows refer to + **permissions**, instead of relations, as this allows for easy nested + computation, and is more readable. + + +##### Subject relations and Arrows + +Arrows operate on the **object** of the subject(s) found on a `relation`. They do **not** operate on the relation/permission of a subject, **even if the subject refers to a relation or permission**. + +For example, in: + +```zed +definition resource { + relation parent: group#member + permission someperm = parent->something +} +``` + +The arrow `parent->something` refers to the `something` permission on the **group**, and `#member` will be ignored. + + + It is recommended to not use arrows over relations that allow for subject + relations without noting that fact via a comment. Why? In one word: + performance. If arrows operated over the subject's relation or permission, a + full LookupSubjects call would be necessary for the arrow to correctly "walk", + which would make these CheckPermission requests potentially *incredibly* + expensive. + + +#### .any (Arrow) + +`.any` is an alias for the [arrow](#--arrow) operation. `parent_folder.any(read)` is equivalent to `parent_folder->read`: + +```zed {13} /parent_folder.any(read)/ +definition user {} + +definition folder { + relation reader: user + permission read = reader +} + +definition document { + relation parent_folder: folder + relation reader: user + + permission read = reader + parent_folder->read + permission read_same = reader + parent_folder.any(read) +} +``` + +#### .all (Intersection Arrow) + +`.all` defines an _intersection_ arrow. + +Similar to the standard [arrow](#--arrow), it walks over all subjects on the referenced relation to a referenced permission/relation. +But unlike the standard [arrow](#--arrow), intersection arrow requires that **all** subjects found on the left side of the arrow have the requested permission/relation. + +For example, imagine a schema where a `document` is viewable by a `user` if they are a `member` of any `group` for the document: + +```zed {9} +definition user {} + +definition group { + relation member: user +} + +definition document { + relation group: group + permission view = group->member +} +``` + +If the goal was to instead allow documents to be viewable only if the `user` is a member of _all_ the document's `group`s, the intersection arrow operator (`.all`) could be used: + +```zed {9} /group.all(member)/ +definition user {} + +definition group { + relation member: user +} + +definition document { + relation group: group + permission view = group.all(member) +} +``` + +In the above example, the user must be in the `member` relation for _all_ groups defined on the `group` relation of a document in order to have the `view` permission. + + + Intersection arrows can impact performance since they require loading **all** + results for the arrow. + + +### Naming Permissions + +Permissions define a set of objects that can perform an action or have some attribute, and thus **permissions should be named as verbs or nouns**, read as `(is/can) {permission name} (the object)`. + +Examples: + +| Name | Read as | +| -------- | ----------------------- | +| `read` | can read the object | +| `write` | can write the object | +| `delete` | can delete the object | +| `member` | is member of the object | + + + You'll note that we also used `member` above in the relation example. Defining + `member` as a **permission** might be found when you have multiple "ways" a + subject can be a member of a resource, thus changing it from a simple relation + to a _computed_ set of subjects. + + +## Comments + +### Documentation Comments + + + It is **highly** recommended to put doc comments on all definitions, relations + and permissions. + + +```zed +/** + * something has some doc comment + */ +``` + +### Non-doc comments + +```zed +// Some comment +/* Some comment */ +``` + +## Full Example + + + +## Common Patterns + +### Group membership + +Apply specific users or members of a group to a permission on an object type. + +In this example, a group can have users as admins and as members. +Both admins and members are considered to have membership in the group. +A role can be applied to individual users and groups. +All individually applied users as well as members for applied groups will have the `allowed` permission. + + + +### Global admin permissions + +Given an organizational hierarchy of objects where (regular) admin users may exist for a single level of the hierarchy, apply permissions for a set of super-admin users that span across all levels of the hierarchy. + +In lieu of adding a super_admin relation on every object that can be administered, add a root object to the hierarchy, in this example platform. +Super admin users can be applied to platform and a relation to platform on top level objects. +Admin permission on resources is then defined as the direct owner of the resource as well as through a traversal of the object hierarchy to the platform super admin. + + + +### Synthetic relations + +Relation traversals can be modeled using intermediate, synthetic relations. + +Given the example hierarchy below, where a portfolio can have folders and folders can have documents, we’d like a viewer of a portfolio to also be able to read documents contained in its folders. +The read on documents could be thought of as: + +``` +reader + parent_folder->reader + parent_folder->parent_portfolio->read +``` + +Synthetic relations can simulate multiple walks across permissions and relations. + + + +### Recursive permissions + +Given a nested set of objects, apply a permission on an object to its descendant objects. + +In this example, a folder can have users with read permission. +Additionally, users that can read the parent folder can also read the current folder. +Checking read permission on a folder will recursively consider these relations as the answer is computed. + + + + + Note that since `parent->read` calls the same `read` permission, it will form + a recursive lookup across the chain of parent folder(s). + + +### Recursive permissions across different resource types + +If a non-recursive resource is used as the starting point for a recursive lookup, it is +**very important** that the permission name used on the right side of the arrow is the **same** in both the starting resource type and the parent resource type(s): + + diff --git a/content/spicedb/concepts/watch.mdx b/content/spicedb/concepts/watch.mdx new file mode 100644 index 0000000..faa7c4b --- /dev/null +++ b/content/spicedb/concepts/watch.mdx @@ -0,0 +1,181 @@ +import { Callout } from "nextra/components"; +import { InlinePlayground } from "@/components/playground"; + +# Watching Relationship Changes + +The [Watch API] in SpiceDB enables clients to monitor changes made to [Relationships] within the system. + +Watch events are generated when relationships are created, touched, or deleted through the [WriteRelationships], [DeleteRelationships] or [ImportBulkRelationships] APIs. + +[Watch API]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.WatchService +[Relationships]: relationships +[WriteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.WriteRelationships +[DeleteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.DeleteRelationships +[ImportBulkRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.ImportBulkRelationships + +## Calling Watch + +To receive watch changes, call the `Watch` API. + +This is a streaming API that will continually return all updates to relationships from the time at which the API call was made. + +```py +from authzed.api.v1 import ( + Client, WatchRequest +) +from grpcutil import bearer_token_credentials + +client = Client( + "localhost:50051", + bearer_token_credentials("your-token-here"), +) + +watcher = client.Watch(WatchRequest{}) +for resp in watcher: + # process the update +``` + +### Receiving historical updates + +Historical updates (i.e. relationship changes in the past) can be retrieved by specifying a [ZedToken] in the `WatchRequest`: + +[ZedToken]: consistency#zedtokens + +```py +from authzed.api.v1 import ( + Client, WatchRequest +) +from grpcutil import bearer_token_credentials + +client = Client( + "localhost:50051", + bearer_token_credentials("your-token-here"), +) + +watcher = client.Watch(WatchRequest( + optional_start_cursor=last_zed_token +)) +for resp in watcher: + # process the update +``` + + + Historical changes can only be requested until the configured garbage + collection window on the underlying datastore. This is typically 24 hours, but + may differ based on the datastore used. + + +### Ensuring continuous processing + +Because Watch is a streaming API, your code should handle disconnections gracefully. + +To ensure continuous processing, the calling client _should_ execute the `Watch` call in a loop, sending in the last received [ZedToken] from `ChangesThrough` if the call disconnects: + +```py +from authzed.api.v1 import ( + Client, WatchRequest +) +from grpcutil import bearer_token_credentials + +client = Client( + "localhost:50051", + bearer_token_credentials("your-token-here"), +) + +last_zed_token = None +while not_canceled: + try: + watcher = client.Watch(WatchRequest( + optional_start_cursor=last_zed_token + )) + for resp in watcher: + # process the update + last_zed_token = resp.changes_through + except Exception: + # log exception + continue +``` + +If your datastore supports checkpoints, you can also request them. + +This will help keep the stream alive during periods of inactivity, which is helpful if your SpiceDB instance sits behind a proxy that terminates idle connections. + +```py +from authzed.api.v1 import ( + Client, + WatchRequest, +) +from authzed.api.v1.watch_service_pb2 import WATCH_KIND_INCLUDE_CHECKPOINTS +from grpcutil import bearer_token_credentials + +client = Client( + "localhost:50051", + bearer_token_credentials("your-token-here"), +) + +last_zed_token = None +while not_canceled: + try: + watcher = client.Watch(WatchRequest( + optional_start_cursor=last_zed_token, + optional_update_kinds=[WATCH_KIND_INCLUDE_CHECKPOINTS] + )) + for resp in watcher: + # process the update + last_zed_token = resp.changes_through + except Exception: + # log exception + continue +``` + +## Transaction Metadata + +SpiceDB's [WriteRelationships] and [DeleteRelationships] APIs support an optional metadata block called the [Transaction Metadata]. + +When `optional_transaction_metadata` is specified on the [WriteRelationships] or [DeleteRelationships] request, it will be stored and returned alongside the relationships in the Watch API: + +```py +from authzed.api.v1 import ( + Client, WatchRequest +) +from grpcutil import bearer_token_credentials + +client = Client( + "localhost:50051", + bearer_token_credentials("your-token-here"), +) +client.WriteRelationships(WriteRelationshipsRequest( + updates=[ + RelationshipUpdate( + operation=RelationshipUpdate.Operation.OPERATION_CREATE, + relationship=Relationship( + resource=ObjectReference(object_type="document", object_id="somedoc"), + relation="viewer", + subject=SubjectReference( + object=ObjectReference( + object_type="user", + object_id="tom", + ) + ), + ), + ), + ], + optional_transaction_metadata=Struct({"request_id": "12345"}), +}) + +... + +WatchResponse{ + Updates: [ + { Relationship: "document:somedoc#viewer@user:tom" } + ], + OptionalTransactionMetadata: { + "request_id": "12345" + } +} + +``` + +This allows callers to correlate write operations and the updates that come from the Watch API. + +[Transaction Metadata]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.WriteRelationshipsRequest diff --git a/content/spicedb/concepts/zanzibar.mdx b/content/spicedb/concepts/zanzibar.mdx new file mode 100644 index 0000000..b0d14ff --- /dev/null +++ b/content/spicedb/concepts/zanzibar.mdx @@ -0,0 +1,272 @@ +import YouTube from "react-youtube"; +import ObjectTypeDiagram from "@/public/images/object-type-regex.svg"; +import ObjectIDDiagram from "@/public/images/object-id-regex.svg"; +import { Callout } from "nextra/components"; + +# Google Zanzibar + +SpiceDB is based on Google Zanzibar, a revolutionary authorization system developed by Google to handle the massive scale and complexity of their services. + +You may recognize the system if you've ever shared access with another user to a Google product like Google Docs or Gmail. + +It's designed to provide consistent, secure, and reliable authorization decisions across Google's vast network of applications and users. + +A [research paper] publicly documenting the system was published at [2019 USENIX Annual Technical Conference][usenix]. You can check out our [annotated version], which describes the concepts behind its design and implementation. + +[research paper]: https://authzed.com/zanzibar +[usenix]: https://www.usenix.org/conference/atc19 +[annotated version]: https://authzed.com/zanzibar + +## History + +In the 2010s, a team at Google was formed to secure objects across SaaS products and internal systems. +Because a single object could be handled by multiple systems (e.g. the core product and a search system), making the problem one of distributed access control, properly handling end-user access controls required architecting a new system. + +In the summer of 2019, researchers at Google published a paper called "Zanzibar: Google's Consistent, Global Authorization system". +This paper documented the design and success of the project that went on to handle authorization logic across Google's product portfolio. + +Before landing on the name Zanzibar, the project was internally referred to as "Spice". +At Google, their mission was to ensure the "ACLs must flow", a reference to ["the spice must flow"][spice-must-flow]. +This theme was chosen because Lea Kissner, one of the co-creators, is a [Dune fan][lea-dune]. +In homage, AuthZed maintained a Dune-related naming scheme for their own projects. + +[lea-dune]: https://twitter.com/LeaKissner/status/1304457030044794880 +[spice-must-flow]: https://fictionhorizon.com/the-meaning-of-dunes-the-spice-must-flow-quote/ + +## Significance + +### Popularizing ReBAC + +**Re**lationship-**b**ased **A**ccess **C**ontrol (ReBAC) is a one of the paradigms for the design of authorization systems. +The core idea behind ReBAC is that the existence of a chain of relationships between a subject and a resource defines access. +This abstraction alone is able to model all other existing authorization paradigms including the very popular RBAC and ABAC designs. +The concept was originally described by Carrie Gates in a 2006 paper entitled [Access Control Requirements for Web 2.0 Security and Privacy][web2-paper] with Facebook cited as an early adopter of this paradigm. +However, it wouldn't be until the publication of the Zanzibar paper in 2019 that ReBAC would achieve popularity outside of applications that weren't already leveraging graph abstractions for their data. + +As [Broken Access Control][broken] now tops the OWASP Top 10, [ReBAC has become the recommended method for building correct authorization systems][owasp-rec]. + +For more information on ReBAC, see the [documentation for Relationships][rels]. + +[web2-paper]: https://www.researchgate.net/profile/Carrie-Gates-2/publication/240787391_Access_Control_Requirements_for_Web_20_Security_and_Privacy/links/540e6f670cf2d8daaacd4adf/Access-Control-Requirements-for-Web-20-Security-and-Privacy.pdf +[broken]: https://owasp.org/Top10/A01_2021-Broken_Access_Control/ +[owasp-rec]: https://cheatsheetseries.owasp.org/cheatsheets/Authorization_Cheat_Sheet.html#prefer-attribute-and-relationship-based-access-control-over-rbac +[rels]: ./relationships + +### New Enemy Problem + +The New Enemy Problem is a scenario where unauthorized access can occur when changes to permissions and the resources they protect are not updated together [consistently]. +SpiceDB solves this problem with configurable consistency and ZedTokens, its version of Zookies. + +The term "Zookies" was first introduced in the Zanzibar paper where solving this problem was a fundamental design goal: + +> ACL checks must respect the order in which users modify ACLs and object contents to avoid unexpected sharing behaviors. +> Specifically, our clients care about preventing the "new enemy" problem, which can arise when we fail to respect the ordering between ACL updates or when we apply old ACLs to new content. +> Consider these two examples: +> +> **Example A: Neglecting ACL update order** +> +> 1. Alice removes Bob from the ACL of a folder; +> 2. Alice then asks Charlie to move new documents to the folder, where document ACLs inherit from folder ACLs; +> 3. Bob should not be able to see the new documents, but may do so if the ACL check neglects the ordering between the two ACL changes. +> +> **Example B: Misapplying old ACL to new content** +> +> 1. Alice removes Bob from the ACL of a document; +> 2. Alice then asks Charlie to add new contents to the document; +> 3. Bob should not be able to see the new contents, but may do so if the ACL check is evaluated with a stale ACL from before Bob's removal. +> +> — [Zanzibar, 2.2 Consistency Model](https://authzed.com/zanzibar#2.2-consistency-model) + +To dig deeper on the New Enemy Problem and the greater topic of consistency, you can read the following: + +- [SpiceDB Consistency Documentation][consistently] +- [Enforcing Causal Ordering in Distributed Systems: The Importance of Permissions Checking](https://authzed.com/blog/new-enemies) + +[consistently]: ./consistency + +### Papers We Love Presentation + +On June 28th 2021, Zanzibar was presented to the [Papers We Love] New York City chapter: + +
+ + +[Papers We Love]: https://paperswelove.org + +## Differences with SpiceDB + +SpiceDB attempts to remain true to Zanzibar's design principles, but without any assumptions around Google's internal infrastructure and use cases. +As a result, many things in SpiceDB are more flexible to accommodate different kinds of users with different software stacks. +For example, [modeling complex user systems][model-users] is possible in SpiceDB, but in Zanzibar all users must be a uint64 identifier. + +Because SpiceDB is not forced on developers as company-wide requirement, the project also values developer experience and making the tooling pleasant to work with. +You can see this in our [Schema Language] and [Playground] which vastly improves the user experience of directly manipulating Protocol Buffers at Google. + +[model-users]: ../modeling/representing-users +[Schema Language]: ../concepts/schema +[Playground]: https://play.authzed.com +[z-diff]: ../concepts/zanzibar#differences-with-spicedb + + + The [Annotated Zanzibar paper] highlights the differences between SpiceDB and Zanzibar! + + [Annotated Zanzibar paper]: https://authzed.com/zanzibar/#annotations/spicedb + + + +### Schema Language + +The Zanzibar paper provides examples of Namespace Configs using the Protocol Buffers text-format. +Internally, Google has a plethora of Protocol Buffer tooling to aid developers in generating Namespace Configs. + +SpiceDB instead offers a [Schema Language][schema] that internally compiles into Namespace Configs. + +[schema]: ./schema + +### Distinguishing Relations from Permissions + +Zanzibar [does not disambiguate][disambiguate] between relations that define access and those that exist purely in the abstract. + +SpiceDB introduces new terms and syntax to differentiate relations into two concepts: Relations and Permissions. + +Permissions are best thought of as the "public API" being consumed by applications to check access. +Permissions are defined using set semantics referred to in Zanzibar parlance as +"computed usersets". + +Relations are purely abstract relationships between objects stored in SpiceDB. +They can be queried by the API, but we highly recommend only ever calling Permissions from the API because Permissions can be updated to compute access backwards compatibly. + +This disambiguation also allowed SpiceDB to drop the confusing `_this` keyword used in Zanzibar userset rewrites. + +[disambiguate]: https://authzed.com/zanzibar/#annotations/spicedb/relations-vs-permissions + +### Reverse Indices + +Both Zanzibar and [SpiceDB][spicedb-expand] implement a ["Reverse Index Expand" API][expand]. + +However, this API responds with a tree structure that can be awkward for applications to consume, especially when it's ideal to avoid co-mingling permissions logic and application code. + +As a result, SpiceDB supports additional APIs: the [LookupResources] and [LookupSubjects] APIs, which are designed to answer the following questions, respectively: + +- "What are all of the resources this subject can access?" +- "What are all of the subjects with access to this resource?" + +[LookupResources]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.LookupResources +[LookupSubjects]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.LookupSubjects + +These APIs make it easier for consumers, because they return a flattened list of results. + +[spicedb-expand]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.ExpandPermissionTree +[expand]: https://authzed.com/zanzibar/2DaJ2vLfht:0.Tk3KF4i94:4S + +### Datastores + +Zanzibar only supports Google's internal [Spanner] service for tuple-storage. + +SpiceDB supports a variety of datastores; including [Cloud Spanner]. + +You can learn more about datastores in the [Datastores documentation]. + +[Spanner]: https://static.googleusercontent.com/media/research.google.com/en//archive/spanner-osdi2012.pdf +[Cloud Spanner]: https://cloud.google.com/spanner/ +[Datastores documentation]: ./datastores + +### Consistency + +Zanzibar supports a [ContentChangeCheck API][ccc-api] and the ability to specify "at least as fresh" as a Zookie. + +SpiceDB simplifies this workflow by allowing API requests to specify their [consistency] behavior in addition to implementing ZedTokens, the analogue of Zanzibar's Zookies. + +[ccc-api]: https://zanzibar.tech/#annotations/spicedb/content-change-check +[Consistency]: consistency + +### Identifiers + +SpiceDB is a bit more flexible with the character-set allowed for Object IDs. + +Object Types follow the following Regular Expression: + + ``` + ^([a-z][a-z0-9_]{1,61}[a-z0-9]\/)*[a-z][a-z0-9_]{1,62}[a-z0-9]$ + ``` + + + +Object IDs follow the following Regular Expression: + + ``` + ^(([a-zA-Z0-9/_|\\\-=+]{1,})|\\*)$ + ``` + + + +### Users + +At Google, all users and services are registered with a service called GAIA (Google Accounts and ID Administration). +GAIA provides unique identifiers for every entity in the form of a 64-bit integer. +Zanzibar is designed with the assumption that any user can be represented using their GAIA ID. + +Because users are not as rigidly defined outside of Google, SpiceDB treats users just like any other object. +This allows SpiceDB to support more complex user systems and perform more powerful queries. + +A simple example is a SpiceDB schema modeling both users and API keys: + + ```zed + definition ApiKey {} + definition User { + relation keys: ApiKey + } + ``` + +You can now model relations and permissions with either type: + + ```zed {4} + definition Post { + relation viewer: User + ... + permission view = viewer + viewer->keys + } + ``` + +Now developers don't have to implement logic in every app that resolves API Keys because SpiceDB already knows how to resolve them. + +### Terminology + +| Zanzibar Term | SpiceDB Term | +| ---------------- | ------------------- | +| Tuple | Relationship | +| Namespace | Object Type | +| Namespace Config | Object Definition | +| Userset | Subject Reference | +| User | Subject Reference | +| Zookie | ZedToken | +| Tupleset | Relationship Set | +| Tupleset Filter | Relationship Filter | + +## FAQ + +### Is Zanzibar the same as ReBAC? + +While Zanzibar is closely associated with ReBAC (Relationship-Based Access Control), it's not exactly the same thing. +While Zanzibar is the authorization system designed by Google, ReBAC is an authorization model focused on relationships between objects to determine access. + +Zanzibar uses ReBAC as its underlying authorization model. +So, you could say that Zanzibar is a ReBAC system, but it's more than that. +It also encompasses the infrastructure, algorithms, and optimizations that allow it to operate at Google's immense scale. + +## Recommended Reading + +- [Annotated Zanzibar Paper](https://authzed.com/zanzibar) +- [Lea Kissner's Zanzibar Description](https://docs.google.com/document/d/1KbJ8Gc65mTkSQXFqBalbiCbGuQClTQDRmKCg1BExAN4/edit) + +### Related Technologies + +- [Spanner]: the datastore used by Zanzibar +- [CockroachDB]: an open-source database inspired by Spanner and used by SpiceDB +- [Slicer]: the dynamic sharding system used by Zanzibar to avoid hotspots +- [F1]: Google Ads backend cited for performance metrics in the Spanner paper + +[Spanner]: https://static.googleusercontent.com/media/research.google.com/en//archive/spanner-osdi2012.pdf +[CockroachDB]: https://dl.acm.org/doi/pdf/10.1145/3318464.3386134 +[Slicer]: https://www.usenix.org/system/files/conference/osdi16/osdi16-adya.pdf +[F1]: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/41344.pdf diff --git a/content/spicedb/getting-started/_meta.ts b/content/spicedb/getting-started/_meta.ts new file mode 100644 index 0000000..bdde130 --- /dev/null +++ b/content/spicedb/getting-started/_meta.ts @@ -0,0 +1,12 @@ +import type { MetaRecord } from 'nextra' + +export default { + "discovering-spicedb": "What is SpiceDB?", + "first-steps": "First Steps", + install: "Installing SpiceDB", + "client-libraries": "Client Libraries", + "installing-zed": "Installing the CLI", + "coming-from": "Coming From", + "protecting-a-blog": "Example: Protecting a Blog Application", + faq: "FAQ", +} satisfies MetaRecord; diff --git a/content/spicedb/getting-started/client-libraries.mdx b/content/spicedb/getting-started/client-libraries.mdx new file mode 100644 index 0000000..eb804d6 --- /dev/null +++ b/content/spicedb/getting-started/client-libraries.mdx @@ -0,0 +1,48 @@ +# Official Client Libraries + +SpiceDB is primarily accessed by a [gRPC] API and thus client libraries can be generated for any programming language. + +AuthZed builds and maintains gRPC client libraries for the following languages: + +- [Go](https://github.com/authzed/authzed-go) +- [Node](https://github.com/authzed/authzed-node) +- [Python](https://github.com/authzed/authzed-py) +- [Ruby](https://github.com/authzed/authzed-rb) +- [Java](https://github.com/authzed/authzed-java) +- [Dotnet](https://github.com/authzed/authzed-dotnet) + +Because the above libraries are generated from protobuf definitions in our [API repo], +the primary documentation for the gRPC API is in the [buf documentation] for SpiceDB's services. +The gRPC client documentation associated with each host language will also be helpful for putting together invocations. +Additionally, there are `example` directories in the client libraries that provide example usages. + +## HTTP Clients + +SpiceDB exposes an HTTP API when run with the `--http-enabled` flag. +While Authzed doesn't officially maintain HTTP client libraries, there are [OpenAPI] docs available [here](../api/http-api) and served by a SpiceDB instance running the HTTP server. + +Try it out: + +```sh +docker run --rm -p 50051:50051 -p 8443:8443 authzed/spicedb serve --http-enabled --grpc-preshared-key foobar + +curl localhost:8443/openapi.json +``` + +The OpenAPI JSON can then be converted into a client using a tool like [openapi-ts] or [openapi-python-client]. + +## Other Clients + +AuthZed also develops [zed], a command-line client for interacting with the SpiceDB API. + +You can find more languages and integrations maintained by the community in the [Clients section] of the [Awesome SpiceDB] repository. + +[grpc]: https://grpc.io +[zed]: https://github.com/authzed/zed +[clients section]: https://github.com/authzed/awesome-spicedb#clients +[awesome spicedb]: https://github.com/authzed/awesome-spicedb +[api repo]: https://github.com/authzed/api +[buf documentation]: https://buf.build/authzed/api/docs/main:authzed.api.v1 +[openapi]: https://www.openapis.org/ +[openapi-ts]: https://github.com/hey-api/openapi-ts +[openapi-python-client]: https://github.com/openapi-generators/openapi-python-client diff --git a/content/spicedb/getting-started/coming-from/_meta.ts b/content/spicedb/getting-started/coming-from/_meta.ts new file mode 100644 index 0000000..096d85e --- /dev/null +++ b/content/spicedb/getting-started/coming-from/_meta.ts @@ -0,0 +1,6 @@ +import type { MetaRecord } from 'nextra' + +export default { + opa: "Open Policy Agent", + cancancan: "Ruby on Rails", +} satisfies MetaRecord; diff --git a/content/spicedb/getting-started/coming-from/cancancan.mdx b/content/spicedb/getting-started/coming-from/cancancan.mdx new file mode 100644 index 0000000..5a8834c --- /dev/null +++ b/content/spicedb/getting-started/coming-from/cancancan.mdx @@ -0,0 +1,65 @@ +import { Callout } from "nextra/components"; +import PSDLight from "@/public/images/ps-light.svg"; +import PSDDark from "@/public/images/ps-dark.svg"; +import CCCLight from "@/public/images/ps-cancancan-light.svg"; +import CCCDark from "@/public/images/ps-cancancan-dark.svg"; +import SpiceDBLight from "@/public/images/ps-spicedb-light.svg"; +import SpiceDBDark from "@/public/images/ps-spicedb-dark.svg"; + +# SpiceDB for Ruby on Rails CanCanCan users + +This document is designed to cover the conceptual differences between SpiceDB and the popular Ruby on Rails gem CanCanCan. + + + The focus of the content below is not intended to be a competitive analysis, + but rather a bridge to understand SpiceDB for existing Rails users. + + +## SpiceDB vs CanCanCan + +Every complete permissions system is made up of three major components: _models_, _data_, and an _engine_. + +While comparing SpiceDB and CanCanCan is akin to comparing apples and oranges because they are fundamentally two different approaches, both can be analyzed through the lens of these three components to understand the design of each. + +A quick recap on the components and their purpose: + +- **Models** define the logic and rules governing actions in the system. +- **Data** provides the context for action itself (who's doing it, the object of the action, and more) +- An **engine** interprets models and data in order to make an access control decision + +
+ + +
+ +CanCanCan is a _library_ (gem) designed to help model authorization in Ruby on Rails projects. +The fundamental idea behind most authorization libraries is that access control can be modeled by reusing as much as possible from within your existing web framework. +For cancancan, developers define their models from within an `Ability` class. +Engine logic is implemented within the library such that developers can simply load the `Ability` class within their `Controller` classes to enforce their models. +Data outside of the request context is left open ended, but most often is fetched from the primary database using ActiveRecord. + +
+ + +
+ +SpiceDB is a class of technology called a _database_. +The fundamental idea behind databases is that they store and index data efficiently so that it can be effectively queried. +While many databases are general-purpose, SpiceDB is optimized specifically for storing and querying authorization data. +SpiceDB provides all three components of a permissions systems: schema provides the models, the data is stored within the database itself, and the query engine interprets the two in order to make access control decisions. + +
+ + +
+ +## When to use SpiceDB instead of Cancancan + +SpiceDB provides a centralized system and, while doing so, enforces opinions about how authorization should be done. +Adopting SpiceDB can be a powerful tool for enabling a shift towards centralizing authorization into one service that can be operated by a specialized team. +This design is a perfect fit for when there are multiple software services in an organization that needs to share data in order to secure access to their resources. + +## When to use Cancancan instead of SpiceDB + +Cancancan, and authorization libraries in general, excels when a monolithic or modular monolith architecture is the best fit for your software stack. +For example, there are framework utilities and reusable app libraries that your authorization models can leverage. diff --git a/content/spicedb/getting-started/coming-from/opa.mdx b/content/spicedb/getting-started/coming-from/opa.mdx new file mode 100644 index 0000000..70fe2fd --- /dev/null +++ b/content/spicedb/getting-started/coming-from/opa.mdx @@ -0,0 +1,70 @@ +import { Callout } from "nextra/components"; +import PSDLight from "@/public/images/ps-light.svg"; +import PSDDark from "@/public/images/ps-dark.svg"; +import OPALight from "@/public/images/ps-opa-light.svg"; +import OPADark from "@/public/images/ps-opa-dark.svg"; +import SpiceDBLight from "@/public/images/ps-spicedb-light.svg"; +import SpiceDBDark from "@/public/images/ps-spicedb-dark.svg"; + +# SpiceDB for Open Policy Agent (OPA) users + +This document is designed to cover the conceptual differences between SpiceDB and Open Policy Agent (OPA). + + + The focus of the content below is not intended to be a competitive analysis, + but rather a bridge to understand SpiceDB for existing OPA users. + + +## SpiceDB vs OPA + +Every complete permissions system is made up of three major components: _models_, _data_, and an _engine_. + +While comparing SpiceDB and OPA is akin to comparing apples and oranges because they are fundamentally two different approaches, both can be analyzed through the lens of these three components to understand the design of each. + +A quick recap on the components and their purpose: + +- **Models** define the logic and rules governing actions in the system. +- **Data** provides the context for action itself (who's doing it, the object of the action, and more) +- An **engine** interprets models and data in order to make an access control decision + +
+ + +
+ +Open Policy Agent is a class of technology called a _policy engine_. +The fundamental idea behind policy engines is that access control decisions are the result of executing a computer program. + +Referring back to the three components of a complete permissions system, policy engines typically implement only two of the three components: models and an engine. +Developers write programs, called policies, to provide the models of their system. +For OPA, their models are written in a language called Rego that was inspired by Datalog. +An engine, OPA itself, then executes these policies using any data usually provided at runtime with the request. +For data that is not provided with a request, there exist various configurations, strategies, and software projects for providing data into OPA-based systems; this aspect of OPA is open-ended. + +
+ + +
+ +SpiceDB is a class of technology called a _database_. +The fundamental idea behind databases is that they store and index data efficiently so that it can be effectively queried. +While many databases are general-purpose, SpiceDB is optimized specifically for storing and querying authorization data. +SpiceDB provides all three components of a permissions systems: schema provides the models, the data is stored within the database itself, and the query engine interprets the two in order to make access control decisions. + +
+ + +
+ +## When to use SpiceDB instead of OPA + +SpiceDB provides a comprehensive system and, while doing so, enforces opinions about how authorization should be done. +It encourages developers to consider data consistency upfront as they integrate applications and face other, often silent, failure domains such as the [New Enemy Problem]. +Adopting SpiceDB can be a powerful tool for enabling a shift towards centralizing authorization into one service that can be operated by a specialized team. + +[New Enemy Problem]: ../../concepts/zanzibar#new-enemy-problem + +## When to use OPA instead of SpiceDB + +OPA excels in scenarios where it can be easily embedded or ran alongside an existing workload. +Because it is a fairly open-ended how one deals with data in OPA deployments, it shines best as a solution when access control decisions don't require much data. diff --git a/content/spicedb/getting-started/discovering-spicedb.mdx b/content/spicedb/getting-started/discovering-spicedb.mdx new file mode 100644 index 0000000..feead7a --- /dev/null +++ b/content/spicedb/getting-started/discovering-spicedb.mdx @@ -0,0 +1,98 @@ +import { Cards } from "nextra/components"; +import { + faQuestion, + faHand, + faDollarSign, +} from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; + +# SpiceDB Documentation + +Welcome to the official documentation for the SpiceDB ecosystem. + + + } + title="New? Follow our first steps guide" + href="first-steps" + /> + } + title="Got questions? Our FAQ has answers" + href="faq" + /> + } + title="Paid products? That's the AuthZed docs" + href="../../authzed/guides/picking-a-product" + /> + + +## What is SpiceDB? + +SpiceDB is an open-source, [Google Zanzibar]-inspired database system for real-time, security-critical application permissions. + +Developers create and apply a [schema] that models their application's resources and permissions. +From their applications, [client libraries] are used to insert [relationships] or check permissions in their applications. + +Building modern authorization from scratch is non-trivial and requires years of development from domain experts. +Until SpiceDB, the only developers with access to these workflows were employed by massive tech companies that could invest in building mature, but proprietary solutions. +Now we have a community organized around sharing this technology so the entire industry can benefit. + +In some scenarios, SpiceDB can be challenging to operate because it is a critical, low-latency, distributed system. +For folks interested in managed SpiceDB services and enterprise functionality, there are [AuthZed's products][authzed-products]. + +[Google Zanzibar]: https://authzed.com/blog/what-is-zanzibar/ +[schema]: https://authzed.com/docs/guides/schema +[client libraries]: https://github.com/authzed/awesome-spicedb#clients +[relationships]: ../concepts/relationships +[authzed-products]: ../../authzed/guides/picking-a-product + +### A brief SpiceDB history lesson + +In August 2020, the founders of AuthZed left [Red Hat], who had acquired their previous company [CoreOS]. +In the following month, they would write the first API-complete implementation of Zanzibar; project Arrakis was written in lazily-evaluated, type-annotated Python. +In September, Arrakis was demoed as a part of their [YCombinator] application. +In March 2021, Arrakis was rewritten in Go, a project code named Caladan. +This rewrite would eventually be open-sourced in September 2021 under the name [SpiceDB]. + +You can also read the [history of Google's Zanzibar project][zanzibar-history], the spiritual predecessor and inspiration for SpiceDB. + +[Red Hat]: https://redhat.com +[CoreOS]: https://www.redhat.com/en/technologies/cloud-computing/openshift/what-was-coreos +[YCombinator]: https://www.ycombinator.com/companies/authzed +[SpiceDB]: https://authzed.com/blog/spicedb-is-open-source-zanzibar +[zanzibar-history]: ../concepts/zanzibar#history + +### SpiceDB Features + +Features that distinguish SpiceDB from other systems include: + +- Expressive [gRPC] and [HTTP/JSON] APIs for checking permissions, listing access, and powering devtools +- A distributed, parallel graph engine faithful to the architecture described in [Google's Zanzibar paper] +- A flexible consistency model configurable [per request] that includes resistance to the [New Enemy Problem] +- An expressive [schema language] with a [playground] and CI/CD integrations for [validation] and [integration testing] +- A pluggable [storage system] supporting [in-memory], [Spanner], [CockroachDB], [PostgreSQL] and [MySQL] +- Deep observability with [Prometheus] metrics, [pprof] profiles, structured logging, and [OpenTelemetry] tracing + +[gRPC]: https://buf.build/authzed/api/docs/main:authzed.api.v1 +[HTTP/JSON]: https://app.swaggerhub.com/apis-docs/authzed/authzed/1.0 +[per request]: https://docs.authzed.com/reference/api-consistency +[New Enemy Problem]: https://authzed.com/blog/new-enemies/ +[schema language]: https://docs.authzed.com/guides/schema +[playground]: https://play.authzed.com +[validation]: https://github.com/authzed/action-spicedb-validate +[integration testing]: https://github.com/authzed/action-spicedb +[storage system]: https://authzed.com/docs/spicedb/selecting-a-datastore +[in-memory]: https://github.com/hashicorp/go-memdb +[PostgreSQL]: https://www.postgresql.org +[Spanner]: https://cloud.google.com/spanner +[CockroachDB]: https://github.com/cockroachdb/cockroach +[MySQL]: https://www.mysql.com +[Prometheus]: https://prometheus.io +[pprof]: https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/ +[OpenTelemetry]: https://opentelemetry.io +[Google's Zanzibar paper]: https://authzed.com/zanzibar diff --git a/content/spicedb/getting-started/faq.mdx b/content/spicedb/getting-started/faq.mdx new file mode 100644 index 0000000..eb162f3 --- /dev/null +++ b/content/spicedb/getting-started/faq.mdx @@ -0,0 +1,58 @@ +# Frequently-asked Questions + +## Is SpiceDB Open Source? + +SpiceDB is developed as an [Apache 2.0-licensed][apache] open-source, community-first effort. + +Large contributions must follow a proposal and feedback process regardless of whether the authors are maintainers, AuthZed employees, or brand new to the community. + +Other AuthZed open source projects are typically licensed [Apache 2.0][apache] unless they are a fork of another codebase. +Example code is [MIT-licensed][mit] so that they can be modified and adopted into any codebase. + +Not all code produced at AuthZed is open source. +There are two conditions under which code is kept proprietary: + +- Functionality is minimally applicable to the community and is directly tied to enterprise environments +- Functionality is tied to AuthZed's infrastructure and is not widely applicable to all deployments + +[apache]: https://www.tldrlegal.com/license/apache-license-2-0-apache-2-0 +[mit]: https://www.tldrlegal.com/license/mit-license + +## Does SpiceDB secure IT infrastructure? + +SpiceDB is a database designed to be integrated into applications. + +There are some organizations with homegrown IT use-cases that use SpiceDB. +However, for most IT use cases, this is probably more low-level than what you need. + +We recommend looking into tools designed around specific IT workflows such as auditing ([Orca], [PrismaCloud]), governance, access management ([Indent], [ConductorOne]). + +[Orca]: https://orca.security/platform/cloud-security-posture-management-cspm/ +[PrismaCloud]: https://www.paloaltonetworks.com/prisma/cloud +[Indent]: https://indent.com +[ConductorOne]: https://conductorone.com + +## Is SpiceDB a policy engine? + +SpiceDB is not a policy engine. + +SpiceDB was inspired by Zanzibar, which popularized the concept of Relationship-based access control (ReBAC). +ReBAC systems offer correctness, performance, and scaling guarantees that are not possible in systems designed purely around policy. +Notably, policy engines cannot implement [Reverse Indices]. + +However, there are some scenarios where ReBAC systems can benefit from dynamic enforcement. +For these scenarios, SpiceDB supports [Caveats] as a light-weight form of policy that avoids pitfalls present in many other systems. + +[Reverse Indices]: ../concepts/zanzibar/#reverse-indices +[caveats]: ../concepts/caveats + +## How can I get involved with SpiceDB? + +The best first step is to join [Discord]. + +Discord is a great place to chat with other community members and the maintainers of the software. + +If you're looking to contribute code, you can read [CONTRIBUTING.md] in our open source projects for details how to contribute, good first issues, and common development workflows. + +[Discord]: https://authzed.com/discord +[CONTRIBUTING.md]: https://github.com/authzed/spicedb/blob/main/CONTRIBUTING.md diff --git a/content/spicedb/getting-started/first-steps.mdx b/content/spicedb/getting-started/first-steps.mdx new file mode 100644 index 0000000..a1aa303 --- /dev/null +++ b/content/spicedb/getting-started/first-steps.mdx @@ -0,0 +1,98 @@ +import { Cards, Steps } from "nextra/components"; +import YouTube from "react-youtube"; +import { + faTerminal, + faServer, + faQuestion, + faHand, + faDollarSign, + faPhone, +} from "@fortawesome/free-solid-svg-icons"; +import { faDocker, faAws } from "@fortawesome/free-brands-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; + +import { InlinePlayground } from "@/components/playground"; + +# First steps + + + + ### Learn the concepts + + We've documented the concepts SpiceDB users should understand: + + + + + + + + After these, we recommend these concepts for running SpiceDB: + + + + + + + + + + Finally, some more advanced concepts are still fundamental: + + + + + + + ### Dive into some videos + + SpiceDB developers and community members have recorded videos explaining concepts, modeling familiar applications, and deep-diving into the tech powering everything! + +
+ + + + + ### Experiment in the Playground + + You can experiment with and share schema and data snippets on the [Playground][pg]. + + Here's an easy example to toy with: + +
+ + [pg]: https://play.authzed.com + + ### Install the SpiceDB toolchain + + You can easily import the schema and data from the playground into a real SpiceDB instance using `zed import`. + + SpiceDB and Zed run on Linux, macOS, and Windows on both AMD64 and ARM64 architectures. + + Follow the instructions below to install to your development machine: + + + } title="Install the SpiceDB server binary" href="/spicedb/getting-started/installing-spicedb" /> + } title="Install the SpiceDB CLI tool: Zed" href="/spicedb/getting-started/installing-zed" /> + + + ### Take SpiceDB to Production + + Once you're ready to take things into production, you can reference our guides or explore a managed solution with AuthZed. + + + } title="Deploy the Operator on Kubernetes" href="/spicedb/ops/deploying-spicedb-operator" /> + } title="Install a SpiceDB cluster on Amazon EKS" href="/spicedb/ops/eks" /> + } title="Schedule a call with AuthZed" href="https://authzed.com/call?utm_source=docs_discovering_spicedb" /> + + + ### Join the SpiceDB Discord + + Have questions? Ask us! + +
+ + + [discord]: https://discord.gg/spicedb + +
diff --git a/content/spicedb/getting-started/install/_meta.ts b/content/spicedb/getting-started/install/_meta.ts new file mode 100644 index 0000000..1f27252 --- /dev/null +++ b/content/spicedb/getting-started/install/_meta.ts @@ -0,0 +1,10 @@ +import type { MetaRecord } from 'nextra' + +export default { + macos: "macOS", + docker: "Docker", + kubernetes: "Kubernetes", + debian: "Ubuntu/Debian", + rhel: "RHEL/CentOS", + windows: "Windows", +} satisfies MetaRecord; diff --git a/content/spicedb/getting-started/install/debian.mdx b/content/spicedb/getting-started/install/debian.mdx new file mode 100644 index 0000000..5505564 --- /dev/null +++ b/content/spicedb/getting-started/install/debian.mdx @@ -0,0 +1,73 @@ +import { Callout } from "nextra/components"; + +# Installing SpiceDB on Ubuntu or Debian + +This document outlines how to install SpiceDB for systems running [Debian-like Linux distributions][debianlike]. + +Every release of SpiceDB publishes `.deb` packages, [snap] packages, and tarballs for AMD64 and ARM64 Linux. + +Looking for `.rpm` packages? +Visit the doc on [Installing SpiceDB on RHEL/CentOS][rhel] + +[debianlike]: https://en.wikipedia.org/wiki/List_of_Linux_distributions#Debian-based +[rhel]: ./rhel +[snap]: https://snapcraft.io/spicedb + +## Installing SpiceDB using apt-get + +First, download the public signing key for the repository: + +```sh +# In releases older than Debian 12 and Ubuntu 22.04, the folder `/etc/apt/keyrings` does not exist by default, and it should be created before the curl command. +# sudo mkdir -p -m 755 /etc/apt/keyrings + +curl -sS https://pkg.authzed.com/apt/gpg.key | sudo gpg --dearmor --yes -o /etc/apt/keyrings/authzed.gpg +``` + +Then add the list file for the repository: + +```sh +echo "deb [signed-by=/etc/apt/keyrings/authzed.gpg] https://pkg.authzed.com/apt/ * *" | sudo tee /etc/apt/sources.list.d/authzed.list +sudo chmod 644 /etc/apt/sources.list.d/authzed.list # helps tools such as command-not-found to work correctly +``` + +Alternatively, if you want to use the new `deb822`-style `authzed.sources` format, put the following in `/etc/apt/sources.list.d/authzed.sources`: + +``` +Types: deb +URIs: https://pkg.authzed.com/apt/ +Suites: * +Components: * +Signed-By: /etc/apt/keyrings/authzed.gpg +``` + +Once you've defined the sources and updated your apt cache, it can be installed just like any other package: + +```sh +sudo apt update +sudo apt install -y spicedb +``` + +## Installing SpiceDB using snap + +SpiceDB is available in the [Snap Store][snap] making it a single command to install: + +```sh +sudo snap install spicedb +``` + +## Manually installing SpiceDB binary for Linux + +Manual installations of SpiceDB can use the following command to download the latest release for their platform and architecture: + +```sh +curl https://api.github.com/repos/authzed/spicedb/releases | \ +jq --arg platform $(uname | tr '[:upper:]' '[:lower:]') --arg arch $(uname -m) '.[0].assets.[] | select (.name | contains($platform+"_"+$arch)) | .browser_download_url' -r | \ +xargs curl -LO +``` + +Afterwards, it is up to the user to extract the archive and decide where to place its contents on their systems. + +We recommend following the [XDG Base Directory Specification][xdg] if you're not trying to install SpiceDB system-wide. + +[xdg]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html diff --git a/content/spicedb/getting-started/install/docker.mdx b/content/spicedb/getting-started/install/docker.mdx new file mode 100644 index 0000000..5c30215 --- /dev/null +++ b/content/spicedb/getting-started/install/docker.mdx @@ -0,0 +1,49 @@ +import { Callout } from "nextra/components"; + +# Installing SpiceDB with Docker + +This document outlines how to install SpiceDB for systems running Docker (or similar container runtimes). + +Every release of SpiceDB publishes AMD64 and ARM64 images to multiple public registries: + +- [authzed/spicedb](https://hub.docker.com/r/authzed/spicedb) +- [ghcr.io/authzed/spicedb](https://github.com/authzed/spicedb/pkgs/container/spicedb) +- [quay.io/authzed/spicedb](https://quay.io/authzed/spicedb) + +While the SpiceDB image on Docker Hub has millions of downloads, for production usage we recommend pushing a copy of the SpiceDB image to your own registry to avoid any outages or rate-limits impacting your deployment. + +## Pulling the latest SpiceDB Docker image + +You can install the latest version of SpiceDB by using the standard pull command with your registry of choice. + +```sh +docker pull authzed/spicedb:latest +``` + + + **Warning:** + Production deployments should never use the `latest` tag. + + Instead, opt for a tag referencing a specific release. + For automated upgrades consider deploying the [SpiceDB Operator]. + +[SpiceDB Operator]: /spicedb/ops/operator + + + +## Pulling the latest SpiceDB Debug image + +By default, SpiceDB images are based on [Chainguard Images][cgi] in order to remain minimal and secure. + +However, this can complicate debugging because there are no tools within the image if you need to debug. + +If you want to execute a user session into a running SpiceDB container and install packages for debugging, you can use one of our debug images: + +```sh +docker pull authzed/spicedb:latest-debug +``` + +Every release of SpiceDB has a corresponding debug image. +Add `-debug` any release tag to pull it. + +[cgi]: https://github.com/chainguard-images/images diff --git a/content/spicedb/getting-started/install/kubernetes.mdx b/content/spicedb/getting-started/install/kubernetes.mdx new file mode 100644 index 0000000..3955d76 --- /dev/null +++ b/content/spicedb/getting-started/install/kubernetes.mdx @@ -0,0 +1,90 @@ +import { Callout } from "nextra/components"; + +# Installing SpiceDB on Kubernetes + +This document outlines how to install SpiceDB for systems running Kubernetes. + +SpiceDB runs great in many environments, but our recommendation is to use Kubernetes in production deployments. +The SpiceDB developers have deep experience with Kubernetes and can offer the best support in this environment. +Additionally, SpiceDB has some additional logic for things such as peer discovery that make the experience a bit more seamless when using Kubernetes. + + + **Did You Know?** + The team behind SpiceDB helped pioneer the container and Kubernetes ecosystem. + +SpiceDB was born out of frustration by folks from [CoreOS] and [OpenShift]. + +[CoreOS]: https://en.wikipedia.org/wiki/Container_Linux +[OpenShift]: https://en.wikipedia.org/wiki/OpenShift + + + +## Installing SpiceDB using the SpiceDB Operator + +The SpiceDB Operator is the best way to deploy SpiceDB in production. + +To install the latest version of the SpiceDB Operator you can run the following command: + +```sh +kubectl apply --server-side -f https://github.com/authzed/spicedb-operator/releases/latest/download/bundle.yaml +``` + +After installation, you can create an example deployment of SpiceDB by applying a SpiceDBCluster Custom Resource: + +```sh +kubectl apply --server-side -f - < + **Warning:** There is no officially supported Helm Chart for SpiceDB; use this + at your own risk. + + +There is a [community maintained Chart][chart] by the folks over at [Bushel]. +As per their instructions, you can install it like this: + +```sh +helm repo add spicedb-operator-chart https://bushelpowered.github.io/spicedb-operator-chart/ +helm repo update +helm repo upgrade --install ... $RELEASE spicedb-operator-chart/spicedb-operator +``` + +[chart]: https://github.com/bushelpowered/spicedb-operator-chart +[Bushel]: https://bushelpowered.com diff --git a/content/spicedb/getting-started/install/macos.mdx b/content/spicedb/getting-started/install/macos.mdx new file mode 100644 index 0000000..168db3d --- /dev/null +++ b/content/spicedb/getting-started/install/macos.mdx @@ -0,0 +1,39 @@ +# Installing SpiceDB on macOS + +This document outlines how to install SpiceDB for systems running Apple's macOS. + +Every release of SpiceDB publishes for both Intel (AMD64) and M-series (ARM64) versions of macOS. + +## Installing SpiceDB using Homebrew + +The quickest way to get started with SpiceDB on macOS is to use Homebrew. +This will install both zed, the command-line tool, and the SpiceDB server binary. + +```sh +brew install authzed/tap/spicedb authzed/tap/zed +``` + +## Updating SpiceDB using Homebrew + +SpiceDB will log a warning if you are running an out-of-date version. +To ensure that you are using the latest stable release, you can run the following command to upgrade an existing installation of SpiceDB: + +```sh +brew upgrade authzed/tap/spicedb +``` + +## Manually installing SpiceDB binary for macOS + +Manual installations of SpiceDB for macOS can use the following command to download the latest release for their platform and architecture: + +```sh +curl https://api.github.com/repos/authzed/spicedb/releases | \ +jq --arg platform $(uname | tr '[:upper:]' '[:lower:]') --arg arch $(uname -m) '.[0].assets.[] | select (.name | contains($platform+"_"+$arch)) | .browser_download_url' -r | \ +xargs curl -LO +``` + +Afterwards, it is up to the user to extract the archive and decide where to place its contents on their systems. + +We recommend following the [XDG Base Directory Specification][xdg] if you're not trying to install SpiceDB system-wide. + +[xdg]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html diff --git a/content/spicedb/getting-started/install/rhel.mdx b/content/spicedb/getting-started/install/rhel.mdx new file mode 100644 index 0000000..52cd1f0 --- /dev/null +++ b/content/spicedb/getting-started/install/rhel.mdx @@ -0,0 +1,47 @@ +# Installing SpiceDB on RHEL or CentOS + +This document outlines how to install SpiceDB for systems running [RPM-based Linux distributions][rpm]. + +Every release of SpiceDB publishes `.rpm` packages and tarballs for AMD64 and ARM64 Linux. + +Looking for `.deb` packages? +Visit the doc on [Installing SpiceDB on Ubuntu/Debian][deb] + +[rpm]: https://en.wikipedia.org/wiki/List_of_Linux_distributions#RPM-based +[deb]: ./debian + +## Installing SpiceDB using dnf + +Before installing SpiceDB, you must first add the source for official SpiceDB RPM builds: + +```sh +sudo cat << EOF >> /etc/yum.repos.d/authzed.repo +[authzed] +name=AuthZed Fury Repository +baseurl=https://pkg.authzed.com/yum/ +enabled=1 +gpgcheck=0 +EOF +``` + +You can now install SpiceDB and zed, the official command-line tool as normal: + +```sh +sudo dnf install -y spicedb zed +``` + +## Manually installing SpiceDB binary for Linux + +Manual installations of SpiceDB for macOS can use the following command to download the latest release for their platform and architecture: + +```sh +curl https://api.github.com/repos/authzed/spicedb/releases | \ +jq --arg platform $(uname | tr '[:upper:]' '[:lower:]') --arg arch $(uname -m) '.[0].assets.[] | select (.name | contains($platform+"_"+$arch)) | .browser_download_url' -r | \ +xargs curl -LO +``` + +Afterwards, it is up to the user to extract the archive and decide where to place its contents on their systems. + +We recommend following the [XDG Base Directory Specification][xdg] if you're not trying to install SpiceDB system-wide. + +[xdg]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html diff --git a/content/spicedb/getting-started/install/windows.mdx b/content/spicedb/getting-started/install/windows.mdx new file mode 100644 index 0000000..f9ba026 --- /dev/null +++ b/content/spicedb/getting-started/install/windows.mdx @@ -0,0 +1,21 @@ +# Installing SpiceDB on Windows + +This document outlines how to install SpiceDB for systems running on Windows. + +Every release of SpiceDB publishes archives containing executables for AMD64 and ARM64 Windows. + +## Installing SpiceDB using Chocolatey + +The quickest way to get started with SpiceDB on Windows is to use [Chocolatey]: + +```powershell +choco install spicedb +``` + +[Chocolatey]: https://push.chocolatey.org + +## Manually installing SpiceDB binary for Windows + +You can download and extract the latest executable for AMD64 and ARM64 Windows from the [releases page]. + +[releases page]: https://github.com/authzed/spicedb/releases/latest diff --git a/content/spicedb/getting-started/installing-zed.mdx b/content/spicedb/getting-started/installing-zed.mdx new file mode 100644 index 0000000..de1fe81 --- /dev/null +++ b/content/spicedb/getting-started/installing-zed.mdx @@ -0,0 +1,1513 @@ +import { Callout } from "nextra/components"; + +# Installing Zed + +[Zed](https://github.com/authzed/zed) is the CLI used to interact with SpiceDB. + +It is built as a standalone executable file which simplifies installation, but one should prefer one of the recommended installation methods detailed below. + +## Debian packages + +[Debian-based Linux] users can install SpiceDB packages by adding an additional apt source. + +First, download the public signing key for the repository: + +```sh +# In releases older than Debian 12 and Ubuntu 22.04, the folder `/etc/apt/keyrings` does not exist by default, and it should be created before the curl command. +# sudo mkdir -p -m 755 /etc/apt/keyrings + +curl -sS https://pkg.authzed.com/apt/gpg.key | sudo gpg --dearmor --yes -o /etc/apt/keyrings/authzed.gpg +``` + +Then add the list file for the repository: + +```sh +echo "deb [signed-by=/etc/apt/keyrings/authzed.gpg] https://pkg.authzed.com/apt/ * *" | sudo tee /etc/apt/sources.list.d/authzed.list +sudo chmod 644 /etc/apt/sources.list.d/authzed.list # helps tools such as command-not-found to work correctly +``` + +Alternatively, if you want to use the new `deb822`-style `authzed.sources` format, put the following in `/etc/apt/sources.list.d/authzed.sources`: + +```sh +Types: deb +URIs: https://pkg.authzed.com/apt/ +Suites: * +Components: * +Signed-By: /etc/apt/keyrings/authzed.gpg +``` + +Once you've defined the sources and updated your apt cache, it can be installed just like any other package: + +```sh +sudo apt update +sudo apt install -y zed +``` + +[Debian-based Linux]: https://en.wikipedia.org/wiki/List_of_Linux_distributions#Debian-based + +## RPM packages + +[RPM-based Linux] users can install packages by adding a new yum repository: + +```sh +sudo cat << EOF >> /etc/yum.repos.d/Authzed-Fury.repo +[authzed-fury] +name=AuthZed Fury Repository +baseurl=https://pkg.authzed.com/yum/ +enabled=1 +gpgcheck=0 +EOF +``` + +Install as usual: + +```sh +sudo dnf install -y zed +``` + +[RPM-based Linux]: https://en.wikipedia.org/wiki/List_of_Linux_distributions#RPM-based + +## Homebrew (macOS) + +macOS users can install packages by adding a [Homebrew tap]: + +```sh +brew install authzed/tap/zed +``` + +[Homebrew tap]: https://docs.brew.sh/Taps + +### Other methods + +#### Docker + +Container images are available for AMD64 and ARM64 architectures on the following registries: + +- [authzed/zed](https://hub.docker.com/r/authzed/zed) +- [ghcr.io/authzed/zed](https://github.com/authzed/zed/pkgs/container/zed) +- [quay.io/authzed/zed](https://quay.io/authzed/zed) + +You can pull down the latest stable release: + +```sh +docker pull authzed/zed +``` + +Afterwards, you can run it with `docker run`: + +```sh +docker run --rm authzed/zed version +``` + +#### Downloading the binary + +Visit the GitHub release page for the [latest release](https://github.com/authzed/zed/releases/latest). +Scroll down to the `Assets` section and download the appropriate artifact. + +#### Source + +Clone the GitHub repository: + +```sh +git clone git@github.com:authzed/zed.git +``` + +Enter the directory and build the binary using mage: + +```sh +cd zed +go build ./cmd/zed +``` + +You can find more commands for tasks such as testing, linting in the repository's [CONTRIBUTING.md]. + +[CONTRIBUTING.md]: https://github.com/authzed/zed/blob/main/CONTRIBUTING.md + +## Reference: `zed` + +A command-line client for managing SpiceDB clusters. + +### Examples + +``` + +zed context list +zed context set dev localhost:80 testpresharedkey --insecure +zed context set prod grpc.authzed.com:443 tc_zed_my_laptop_deadbeefdeadbeefdeadbeefdeadbeef +zed context use dev +zed permission check --explain document:firstdoc writer user:emilia + +``` + +### Options + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed backup](#reference-zed-backup) - Create, restore, and inspect permissions system backups +- [zed context](#reference-zed-context) - Manage configurations for connecting to SpiceDB deployments +- [zed import](#reference-zed-import) - Imports schema and relationships from a file or url +- [zed mcp](#reference-zed-mcp) - MCP (Model Context Protocol) server commands +- [zed permission](#reference-zed-permission) - Query the permissions in a permissions system +- [zed relationship](#reference-zed-relationship) - Query and mutate the relationships in a permissions system +- [zed schema](#reference-zed-schema) - Manage schema for a permissions system +- [zed use](#reference-zed-use) - Alias for `zed context use` +- [zed validate](#reference-zed-validate) - Validates the given validation file (.yaml, .zaml) or schema file (.zed) +- [zed version](#reference-zed-version) - Display zed and SpiceDB version information + +## Reference: `zed backup` + +Create, restore, and inspect permissions system backups + +``` +zed backup [flags] +``` + +### Options + +``` + --page-limit uint32 defines the number of relationships to be read by requested page during backup + --prefix-filter string include only schema and relationships with a given prefix + --rewrite-legacy potentially modify the schema to exclude legacy/broken syntax +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed backup create](#reference-zed-backup-create) - Backup a permission system to a file +- [zed backup parse-relationships](#reference-zed-backup-parse-relationships) - Extract the relationships from a backup file +- [zed backup parse-revision](#reference-zed-backup-parse-revision) - Extract the revision from a backup file +- [zed backup parse-schema](#reference-zed-backup-parse-schema) - Extract the schema from a backup file +- [zed backup redact](#reference-zed-backup-redact) - Redact a backup file to remove sensitive information +- [zed backup restore](#reference-zed-backup-restore) - Restore a permission system from a file + +## Reference: `zed backup create` + +Backup a permission system to a file + +``` +zed backup create [flags] +``` + +### Options + +``` + --page-limit uint32 defines the number of relationships to be read by requested page during backup + --prefix-filter string include only schema and relationships with a given prefix + --rewrite-legacy potentially modify the schema to exclude legacy/broken syntax +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed backup parse-relationships` + +Extract the relationships from a backup file + +``` +zed backup parse-relationships [flags] +``` + +### Options + +``` + --prefix-filter string Include only relationships with a given prefix +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed backup parse-revision` + +Extract the revision from a backup file + +``` +zed backup parse-revision +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed backup parse-schema` + +Extract the schema from a backup file + +``` +zed backup parse-schema [flags] +``` + +### Options + +``` + --prefix-filter string include only schema and relationships with a given prefix + --rewrite-legacy potentially modify the schema to exclude legacy/broken syntax +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed backup redact` + +Redact a backup file to remove sensitive information + +``` +zed backup redact [flags] +``` + +### Options + +``` + --print-redacted-object-ids prints the redacted object IDs + --redact-definitions redact definitions (default true) + --redact-object-ids redact object IDs (default true) + --redact-relations redact relations (default true) +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed backup restore` + +Restore a permission system from a file + +``` +zed backup restore [flags] +``` + +### Options + +``` + --batch-size uint restore relationship write batch size (default 1000) + --batches-per-transaction uint number of batches per transaction (default 10) + --conflict-strategy string strategy used when a conflicting relationship is found. Possible values: fail, skip, touch (default "fail") + --disable-retries retries when an errors is determined to be retryable (e.g. serialization errors) + --prefix-filter string include only schema and relationships with a given prefix + --request-timeout duration timeout for each request performed during restore (default 30s) + --rewrite-legacy potentially modify the schema to exclude legacy/broken syntax +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed context` + +Manage configurations for connecting to SpiceDB deployments + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed context list](#reference-zed-context-list) - Lists all available contexts +- [zed context remove](#reference-zed-context-remove) - Removes a context +- [zed context set](#reference-zed-context-set) - Creates or overwrite a context +- [zed context use](#reference-zed-context-use) - Sets a context as the current context + +## Reference: `zed context list` + +Lists all available contexts + +``` +zed context list [flags] +``` + +### Options + +``` + --reveal-tokens display secrets in results +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed context remove` + +Removes a context + +``` +zed context remove +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed context set` + +Creates or overwrite a context + +``` +zed context set +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed context use` + +Sets a context as the current context + +``` +zed context use +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed import` + +Imports schema and relationships from a file or url + +``` +zed import [flags] +``` + +### Examples + +``` + + From a gist: + zed import https://gist.github.com/ecordell/8e3b613a677e3c844742cf24421c08b6 + + From a playground link: + zed import https://play.authzed.com/s/iksdFvCtvnkR/schema + + From pastebin: + zed import https://pastebin.com/8qU45rVK + + From a devtools instance: + zed import https://localhost:8443/download + + From a local file (with prefix): + zed import file:///Users/zed/Downloads/authzed-x7izWU8_2Gw3.yaml + + From a local file (no prefix): + zed import authzed-x7izWU8_2Gw3.yaml + + Only schema: + zed import --relationships=false file:///Users/zed/Downloads/authzed-x7izWU8_2Gw3.yaml + + Only relationships: + zed import --schema=false file:///Users/zed/Downloads/authzed-x7izWU8_2Gw3.yaml + + With schema definition prefix: + zed import --schema-definition-prefix=mypermsystem file:///Users/zed/Downloads/authzed-x7izWU8_2Gw3.yaml + +``` + +### Options + +``` + --batch-size int import batch size (default 1000) + --relationships import relationships (default true) + --schema import schema (default true) + --schema-definition-prefix string prefix to add to the schema's definition(s) before importing + --workers int number of concurrent batching workers (default 1) +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed mcp` + +MCP (Model Context Protocol) server commands. +The MCP server provides tooling and resources for developing and debugging SpiceDB schema and relationships. The server runs an in-memory development instance of SpiceDB and does not connect to a running instance of SpiceDB. + +To use with Claude Code, run `zed mcp experimental-run` to start the SpiceDB Dev MCP server and then run `claude mcp add --transport http spicedb "http://localhost:9999/mcp"` to add the server to your Claude Code integrations. + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed mcp experimental-run](#reference-zed-mcp-experimental-run) - Run the Experimental MCP server + +## Reference: `zed mcp experimental-run` + +Run the Experimental MCP server + +``` +zed mcp experimental-run [flags] +``` + +### Options + +``` + -p, --port int port for the HTTP streaming server (default 9999) +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed permission` + +Query the permissions in a permissions system + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed permission bulk](#reference-zed-permission-bulk) - Check permissions in bulk exist for resource-subject pairs +- [zed permission check](#reference-zed-permission-check) - Check that a permission exists for a subject +- [zed permission expand](#reference-zed-permission-expand) - Expand the structure of a permission +- [zed permission lookup-resources](#reference-zed-permission-lookup-resources) - Enumerates the resources of a given type for which the subject has permission +- [zed permission lookup-subjects](#reference-zed-permission-lookup-subjects) - Enumerates the subjects of a given type for which the subject has permission on the resource + +## Reference: `zed permission bulk` + +Check permissions in bulk exist for resource-subject pairs + +``` +zed permission bulk ... [flags] +``` + +### Options + +``` + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --explain requests debug information from SpiceDB and prints out a trace of the requests + --json output as JSON + --revision string optional revision at which to check + --schema requests debug information from SpiceDB and prints out the schema used +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed permission check` + +Check that a permission exists for a subject + +``` +zed permission check [flags] +``` + +### Options + +``` + --caveat-context string the caveat context to send along with the check, in JSON form + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --error-on-no-permission if true, zed will return exit code 1 if subject does not have unconditional permission + --explain requests debug information from SpiceDB and prints out a trace of the requests + --json output as JSON + --schema requests debug information from SpiceDB and prints out the schema used +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed permission expand` + +Expand the structure of a permission + +``` +zed permission expand [flags] +``` + +### Options + +``` + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --json output as JSON + --revision string optional revision at which to check +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed permission lookup-resources` + +Enumerates the resources of a given type for which the subject has permission + +``` +zed permission lookup-resources [flags] +``` + +### Options + +``` + --caveat-context string the caveat context to send along with the lookup, in JSON form + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --cursor string resume pagination from a specific cursor token + --json output as JSON + --page-limit uint32 limit of relations returned per page + --revision string optional revision at which to check + --show-cursor display the cursor token after pagination (default true) +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed permission lookup-subjects` + +Enumerates the subjects of a given type for which the subject has permission on the resource + +``` +zed permission lookup-subjects [flags] +``` + +### Options + +``` + --caveat-context string the caveat context to send along with the lookup, in JSON form + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --json output as JSON + --revision string optional revision at which to check +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship` + +Query and mutate the relationships in a permissions system + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed relationship bulk-delete](#reference-zed-relationship-bulk-delete) - Deletes relationships matching the provided pattern en masse +- [zed relationship create](#reference-zed-relationship-create) - Create a relationship for a subject +- [zed relationship delete](#reference-zed-relationship-delete) - Deletes a relationship +- [zed relationship read](#reference-zed-relationship-read) - Enumerates relationships matching the provided pattern +- [zed relationship touch](#reference-zed-relationship-touch) - Idempotently updates a relationship for a subject +- [zed relationship watch](#reference-zed-relationship-watch) - Watches the stream of relationship updates and schema updates from the server + +## Reference: `zed relationship bulk-delete` + +Deletes relationships matching the provided pattern en masse + +``` +zed relationship bulk-delete [flags] +``` + +### Options + +``` + --force force deletion of all elements in batches defined by + --optional-limit uint32 the max amount of elements to delete. If you want to delete all in batches of size , set --force to true (default 1000) + --subject-filter string optional subject filter +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship create` + +Create a relationship for a subject + +``` +zed relationship create [flags] +``` + +### Options + +``` + -b, --batch-size int batch size when writing streams of relationships from stdin (default 100) + --caveat string the caveat for the relationship, with format: 'caveat_name:{"some":"context"}' + --expiration-time string the expiration time of the relationship in RFC 3339 format + --json output as JSON +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship delete` + +Deletes a relationship + +``` +zed relationship delete [flags] +``` + +### Options + +``` + -b, --batch-size int batch size when deleting streams of relationships from stdin (default 100) + --json output as JSON +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship read` + +Enumerates relationships matching the provided pattern. + +To filter returned relationships using a resource ID prefix, append a '%' to the resource ID: + +zed relationship read some-type:some-prefix-% + +``` +zed relationship read [flags] +``` + +### Options + +``` + --consistency-at-exactly string evaluate at the provided zedtoken + --consistency-at-least string evaluate at least as consistent as the provided zedtoken + --consistency-full evaluate at the newest zedtoken in the database + --consistency-min-latency evaluate at the zedtoken preferred by the database + --json output as JSON + --page-limit uint32 limit of relations returned per page (default 100) + --subject-filter string optional subject filter +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship touch` + +Idempotently updates a relationship for a subject + +``` +zed relationship touch [flags] +``` + +### Options + +``` + -b, --batch-size int batch size when writing streams of relationships from stdin (default 100) + --caveat string the caveat for the relationship, with format: 'caveat_name:{"some":"context"}' + --expiration-time string the expiration time for the relationship in RFC 3339 format + --json output as JSON +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed relationship watch` + +Watches the stream of relationship updates and schema updates from the server + +``` +zed relationship watch [object_types, ...] [start_cursor] [flags] +``` + +### Options + +``` + --filter optional_resource_type:optional_resource_id_or_prefix#optional_relation@optional_subject_filter optional filter(s) for the watch stream. Example: optional_resource_type:optional_resource_id_or_prefix#optional_relation@optional_subject_filter + --object_types strings optional object types to watch updates for + --revision string optional revision at which to start watching + --timestamp shows timestamp of incoming update events +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed schema` + +Manage schema for a permissions system + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +### Children commands + +- [zed schema compile](#reference-zed-schema-compile) - Compile a schema that uses extended syntax into one that can be written to SpiceDB +- [zed schema copy](#reference-zed-schema-copy) - Copy a schema from one context into another +- [zed schema diff](#reference-zed-schema-diff) - Diff two schema files +- [zed schema read](#reference-zed-schema-read) - Read the schema of a permissions system +- [zed schema write](#reference-zed-schema-write) - Write a schema file (.zed or stdin) to the current permissions system + +## Reference: `zed schema compile` + +Compile a schema that uses extended syntax into one that can be written to SpiceDB + +``` +zed schema compile [flags] +``` + +### Examples + +``` + + Write to stdout: + zed preview schema compile root.zed + Write to an output file: + zed preview schema compile root.zed --out compiled.zed + +``` + +### Options + +``` + --out string output filepath; omitting writes to stdout +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed schema copy` + +Copy a schema from one context into another + +``` +zed schema copy [flags] +``` + +### Options + +``` + --json output as JSON + --schema-definition-prefix string prefix to add to the schema's definition(s) before writing +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed schema diff` + +Diff two schema files + +``` +zed schema diff +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed schema read` + +Read the schema of a permissions system + +``` +zed schema read [flags] +``` + +### Options + +``` + --json output as JSON +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed schema write` + +Write a schema file (.zed or stdin) to the current permissions system + +``` +zed schema write [flags] +``` + +### Examples + +``` + + Write from a file: + zed schema write schema.zed + Write from stdin: + cat schema.zed | zed schema write + +``` + +### Options + +``` + --json output as JSON + --schema-definition-prefix string prefix to add to the schema's definition(s) before writing +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed use` + +Alias for `zed context use` + +``` +zed use +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed validate` + +Validates the given validation file (.yaml, .zaml) or schema file (.zed) + +``` +zed validate [flags] +``` + +### Examples + +``` + + From a local file (with prefix): + zed validate file:///Users/zed/Downloads/authzed-x7izWU8_2Gw3.yaml + + From a local file (no prefix): + zed validate authzed-x7izWU8_2Gw3.yaml + + From a gist: + zed validate https://gist.github.com/ecordell/8e3b613a677e3c844742cf24421c08b6 + + From a playground link: + zed validate https://play.authzed.com/s/iksdFvCtvnkR/schema + + From pastebin: + zed validate https://pastebin.com/8qU45rVK + + From a devtools instance: + zed validate https://localhost:8443/download +``` + +### Options + +``` + --fail-on-warn treat warnings as errors during validation + --force-color force color code output even in non-tty environments + --schema-type string force validation according to specific schema syntax ("", "composable", "standard") +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` + +## Reference: `zed version` + +Display zed and SpiceDB version information + +``` +zed version [flags] +``` + +### Options + +``` + --include-deps include dependencies' versions + --include-remote-version whether to display the version of Authzed or SpiceDB for the current context (default true) +``` + +### Options Inherited From Parent Flags + +``` + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB +``` diff --git a/content/spicedb/getting-started/page.mdx b/content/spicedb/getting-started/page.mdx new file mode 100644 index 0000000..e69de29 diff --git a/content/spicedb/getting-started/protecting-a-blog.mdx b/content/spicedb/getting-started/protecting-a-blog.mdx new file mode 100644 index 0000000..23569d7 --- /dev/null +++ b/content/spicedb/getting-started/protecting-a-blog.mdx @@ -0,0 +1,1115 @@ +import { Callout, Tabs } from "nextra/components"; +import YouTube from "react-youtube"; + +# Protecting a Blog Application + +This guide walks through the steps required to deeply integrate an application with Authzed or SpiceDB. +Not all software requires this level of integration, but it is preferable for greenfield applications or applications that are central in an architecture with multiple services. + +Instead of introducing an unfamiliar example app and altering various locations in its code, this guide is written such that each step is a standalone snippet of code that demonstrates an integration point and finding where those points exist in your codebase is an exercise left to the reader. + +Scroll to the bottom of this page for a video walkthrough of creating a Permissions System using AuthZed Cloud. + +## Prerequisites + +One of: + +- An [Authzed Cloud] Permission System +- A [running instance] of [SpiceDB][SpiceDB] with the configured preshared key for SpiceDB: + +```bash +# Using the binary +spicedb serve --grpc-preshared-key "t_your_token_here_1234567deadbeef" + +# Using Docker +docker run --rm -p 50051:50051 authzed/spicedb serve --grpc-preshared-key "t_your_token_here_1234567deadbeef" +``` + +[Authzed Cloud]: https://authzed.com/cloud/signup +[SpiceDB]: https://github.com/authzed/spicedb +[running instance]: /spicedb/getting-started/installing-spicedb + +## Create a Permissions System on AuthZed Cloud + +Sign in to [AuthZed Cloud](https://app.authzed.cloud) and click on the **+Create** button to create a Permissions System (PS) and fill in the necessary details: + +- The type of the PS can be either Production or Development +- Give it a name +- Choose a datastore. +- The update channel can be either be `rapid` or `regular` which determines the behavior of automatic updates when new SpiceDB releases are made available +- The Deployments tab has the following options: + - The name of the deployment + - A dropdown for the region in which the deployment is made. + Currently `us-east-1` and `eu-central-1` are available + - The number of vCPUs for your deployment. + The recommendation is to start with 2 vCPUs and then monitor the Metrics and change it based on your workload + - The number of replicas to deploy SpiceDB with primarily read workloads. + The recommendation is 3 but will depend on your latency requirements. + +Click the Save button to create a Permissions System + +## Configuring Access + +Before using the Permissions System, let's configure access to it. +This functionality enables organizations to apply the principle of least-privilege to services accessing SpiceDB. +For example, read-only tokens can be created for services that should never need to write to SpiceDB. +Read more about it [here](https://authzed.com/docs/authzed/concepts/restricted-api-access) + +Let’s start by creating a **Service Account** which is something that represents your unique workload. +We recommend creating a Service Account for each application that will access the SpiceDB API. +Add a name such as `blog-app` and a description before hitting Save. + +Now let’s create a **token**. +Tokens are long-lived credentials for Service Accounts. +SpiceDB clients must provide a Token in the Authorization header of an API request to perform actions granted to the Service Account. +Click on the `blog-app` service account you just created and then the Tokens item in the menu. +Create a token by providing a name and description. + +Let’s now provide a **Role** and attach a **Policy** to that Role. +A Role defines rules for accessing the SpiceDB API. +Roles are bound to Service Accounts. +Click the Roles -> Create Role and provide a name and a description. +Add the following permissions for this demo: + +``` +ReadSchema +WriteSchema +DeleteRelationships +ReadRelationships +WriteRelationships +CheckPermission +``` + +Finally, let’s create a Policy. +Policies are what bind Roles to a Service Account. +Click on Policies -> Create policy. +Provide a name and a description and pick the Service Account and Role created in the steps above to bind the two. + +You’re now ready to use AuthZed Cloud Permissions System! + +## Client Installation + +The first step to integrating any software is ensuring you have an API client. +Each client is installed with its ecosystem's package management tools: + +You can also interact with the Permissions System using [zed](https://github.com/authzed/zed) - the command-line client for managing SpiceDB clusters. + + + + +```sh +brew install authzed/tap/zed +zed context set +``` + + + + +```sh +# JavaScript and TypeScript +npm i @authzed/authzed-node +``` + + + + +```sh +mkdir first_app && cd first_app +go mod init first_app +go get github.com/authzed/authzed-go +go get github.com/authzed/grpcutil +go mod tidy +``` + + + + +```sh +pip install authzed +``` + + + + +```sh +gem install authzed +``` + + + + +```groovy +// build.gradle +dependencies { + implementation "com.authzed.api:authzed:0.6.0" + implementation 'io.grpc:grpc-protobuf:1.54.1' + implementation 'io.grpc:grpc-stub:1.54.1' +} +``` + + + + +You can find the endpoint on the AuthZed Cloud dashboard. +Click on the Permissions System that's just been created and locate the **Connect** button. +Copy the zed command and paste it in your terminal. +It should look like this: + +``` +zed context set us-east-1 acme-permission-system-xyz.aws.authzed.cloud:443 +``` + +where `us-east-1` is the name of the PS followed by the endpoint. +Replace the `token-here` with the token that was generated in the earlier step. + +## Defining and Applying a Schema + +Regardless of whether or not you have a preexisting schema written, integrating a new application will typically require you add new definitions to the [Schema]. + +[Schema]: /spicedb/concepts/schema + +As a quick recap, Schemas define the objects, their relations, and their checkable permissions that will be available to be used with the Permission System. + +We'll be using the following blog example throughout this guide: + +```zed +definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +} +``` + +This example defines two types of objects that will be used in the permissions system: `user` and `post`. +Each post can have two kinds of relations to users: `reader` and `writer`. +Each post can have two permissions checked: `read` and `write`. +The `read` permission unions together both readers and writers, so that any writer is implicitly granted read, as well. +Feel free to modify and test your own experiments in the [playground]. + +[playground]: https://play.authzed.com/s/mVBBpf5poNd8/schema + +With a schema designed, we can now move on to using our client to apply that schema to the Permission System. + + +Similar to applying schema changes for relational databases, all changes to a schema must be backwards compatible. + +In production environments where relations change, you will likely want to write data migrations and apply those changes using a schema migration toolchain. + + + + + + +```sh +zed schema write <(cat << EOF +definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +} +EOF +) +``` + + + + +```js +import { v1 } from "@authzed/authzed-node"; + +const { promises: client } = v1.NewClient("t_your_token_here_1234567deadbeef"); + +const schema = ` +definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +}`; + +const schemaRequest = v1.WriteSchemaRequest.create({ + schema: schema, +}); + +const schemaResponse = await client.writeSchema(schemaRequest); +console.log(schemaResponse); +``` + + + + +```go +package main + +import ( + "context" + "log" + + pb "github.com/authzed/authzed-go/proto/authzed/api/v1" + "github.com/authzed/authzed-go/v1" + "github.com/authzed/grpcutil" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const schema = `definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +}` + +const spicedbEndpoint = "grpc.authzed.com:443" + +func main() { + systemCerts, err := grpcutil.WithSystemCerts(grpcutil.VerifyCA) + if err != nil { + log.Fatalf("unable to load system CA certificates: %s", err) + } + + client, err := authzed.NewClient( + spicedbEndpoint, + grpcutil.WithBearerToken("t_your_token_here_1234567deadbeef"), + systemCerts, + ) + if err != nil { + log.Fatalf("unable to initialize client: %s", err) + } + + request := &pb.WriteSchemaRequest{Schema: schema} + _, err = client.WriteSchema(context.Background(), request) + if err != nil { + log.Fatalf("failed to write schema: %s", err) + } +} +``` + + + + +```py +from authzed.api.v1 import Client, WriteSchemaRequest +from grpcutil import bearer_token_credentials + +SCHEMA = """definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +}""" + +ENDPOINT = "grpc.authzed.com:443" +CREDENTIALS = bearer_token_credentials("t_your_token_here_1234567deadbeef") + +client = Client(ENDPOINT, CREDENTIALS) +resp = client.WriteSchema(WriteSchemaRequest(schema=SCHEMA)) +``` + + + + +```rb +require 'authzed' + +schema = <<~SCHEMA +definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +} +SCHEMA + +endpoint = 'grpc.authzed.com:443' +credentials = Authzed::GrpcUtil::BearerToken.new(token: 't_your_token_here_1234567deadbeef') + +client = Authzed::Api::V1::Client.new( + target: endpoint, + interceptors: [credentials], +) + +resp = client.schema_service.write_schema( + Authzed::Api::V1::WriteSchemaRequest.new(schema: schema) +) +``` + + + + +```java +import com.authzed.api.v1.SchemaServiceGrpc; +import com.authzed.api.v1.SchemaServiceOuterClass.*; +import com.authzed.grpcutil.BearerToken; +import io.grpc.*; +public class App { + public static void main(String[] args) { + ManagedChannel channel = ManagedChannelBuilder + .forTarget("grpc.authzed.com:443") + .useTransportSecurity() + .build(); + BearerToken bearerToken = new BearerToken("t_your_token_here_1234567deadbeef"); + SchemaServiceGrpc.SchemaServiceBlockingStub schemaService = SchemaServiceGrpc.newBlockingStub(channel) + .withCallCredentials(bearerToken); + String schema = """ + definition user {} + definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer + } + """; + WriteSchemaRequest request = WriteSchemaRequest + .newBuilder() + .setSchema(schema) + .build(); + WriteSchemaResponse response; + try { + response = schemaService.writeSchema(request); + } catch (Exception e) { + // Uh oh! + } + } +} +``` + + + + +## Storing Relationships + +After a permission system has its schema applied, it is ready to have its relationships created, touched, or deleted. +Relationships are live instances of relations between objects. +Because the relationships stored in the system can change at runtime, this is a powerful primitive for dynamically granting or revoking access to the resources you've modeled. +When applications modify or create rows in their database, they will also typically create or update relationships. + + + Writing relationships returns a [ZedToken] which is critical to ensuring + performance and [consistency]. + + +[ZedToken]: /spicedb/concepts/consistency#zedtokens +[consistency]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.Consistency + +In the following example, we'll be creating two relationships: one making Emilia a writer of the first post and another making Beatrice a reader of the first post. +You can also [touch and delete] relationships, but those are not as immediately useful for an empty permission system. + +[touch and delete]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.RelationshipUpdate + + + + +```shell zed +zed relationship create post:1 writer user:emilia +zed relationship create post:1 reader user:beatrice +``` + + + + +```js +import { v1 } from "@authzed/authzed-node"; + +const { promises: client } = v1.NewClient( + "t_your_token_here_1234567deadbeef", + "grpc.authzed.com:50051", +); + +const resource = v1.ObjectReference.create({ + objectType: "post", + objectId: "1", +}); + +const emilia = v1.ObjectReference.create({ + objectType: "user", + objectId: "emilia", +}); + +const beatrice = v1.ObjectReference.create({ + objectType: "user", + objectId: "beatrice", +}); + +const writeRequest = v1.WriteRelationshipsRequest.create({ + updates: [ + // Emilia is a Writer on Post 1 + v1.RelationshipUpdate.create({ + relationship: v1.Relationship.create({ + resource: resource, + relation: "writer", + subject: v1.SubjectReference.create({ object: emilia }), + }), + operation: v1.RelationshipUpdate_Operation.CREATE, + }), + // Beatrice is a Reader on Post 1 + v1.RelationshipUpdate.create({ + relationship: v1.Relationship.create({ + resource: resource, + relation: "reader", + subject: v1.SubjectReference.create({ object: beatrice }), + }), + operation: v1.RelationshipUpdate_Operation.CREATE, + }), + ], +}); + +const response = await client.writeRelationships(writeRequest); + +console.log(response); +``` + + + + +```go +package main + +import ( + "context" + "fmt" + "log" + + pb "github.com/authzed/authzed-go/proto/authzed/api/v1" + "github.com/authzed/authzed-go/v1" + "github.com/authzed/grpcutil" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const ( + spicedbEndpoint = "grpc.authzed.com:443" + token = "t_your_token_here_1234567deadbeef" +) + +func main() { + systemCerts, err := grpcutil.WithSystemCerts(grpcutil.VerifyCA) + if err != nil { + log.Fatalf("unable to load system CA certificates: %s", err) + } + + client, err := authzed.NewClient( + spicedbEndpoint, + grpcutil.WithBearerToken(token), + systemCerts, + // These options are if you're self-hosting and don't want TLS: + // grpcutil.WithInsecureBearerToken(token), + // grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatalf("unable to initialize client: %s", err) + } + + request := &pb.WriteRelationshipsRequest{Updates: []*pb.RelationshipUpdate{ + { // Emilia is a Writer on Post 1 + Operation: pb.RelationshipUpdate_OPERATION_CREATE, + Relationship: &pb.Relationship{ + Resource: &pb.ObjectReference{ + ObjectType: "post", + ObjectId: "1", + }, + Relation: "writer", + Subject: &pb.SubjectReference{ + Object: &pb.ObjectReference{ + ObjectType: "user", + ObjectId: "emilia", + }, + }, + }, + }, + { // Beatrice is a Reader on Post 1 + Operation: pb.RelationshipUpdate_OPERATION_CREATE, + Relationship: &pb.Relationship{ + Resource: &pb.ObjectReference{ + ObjectType: "post", + ObjectId: "1", + }, + Relation: "reader", + Subject: &pb.SubjectReference{ + Object: &pb.ObjectReference{ + ObjectType: "user", + ObjectId: "beatrice", + }, + }, + }, + }, + }} + + resp, err := client.WriteRelationships(context.Background(), request) + if err != nil { + log.Fatalf("failed to write relations: %s", err) + } + fmt.Println(resp.WrittenAt.Token) +} +``` + + + + +```py +from authzed.api.v1 import ( + Client, + ObjectReference, + Relationship, + RelationshipUpdate, + SubjectReference, + WriteRelationshipsRequest, +) +from grpcutil import bearer_token_credentials + +client = Client( + "grpc.authzed.com:443", + bearer_token_credentials("t_your_token_here_1234567deadbeef"), +) + +resp = client.WriteRelationships( + WriteRelationshipsRequest( + updates=[ + # Emilia is a Writer on Post 1 + RelationshipUpdate( + operation=RelationshipUpdate.Operation.OPERATION_CREATE, + relationship=Relationship( + resource=ObjectReference(object_type="post", object_id="1"), + relation="writer", + subject=SubjectReference( + object=ObjectReference( + object_type="user", + object_id="emilia", + ) + ), + ), + ), + # Beatrice is a Reader on Post 1 + RelationshipUpdate( + operation=RelationshipUpdate.Operation.OPERATION_CREATE, + relationship=Relationship( + resource=ObjectReference(object_type="post", object_id="1"), + relation="reader", + subject=SubjectReference( + object=ObjectReference( + object_type="user", + object_id="beatrice", + ) + ), + ), + ), + ] + ) +) + +print(resp.written_at.token) +``` + + + + +```rb +require 'authzed' + +client = Authzed::Api::V1::Client.new( + target: 'grpc.authzed.com:443', + interceptors: [Authzed::GrpcUtil::BearerToken.new(token: 't_your_token_here_1234567deadbeef')], +) + +resp = client.permissions_service.write_relationships( + Authzed::Api::V1::WriteRelationshipsRequest.new( + updates: [ + # Emilia is a Writer on Post 1 + Authzed::Api::V1::RelationshipUpdate.new( + operation: Authzed::Api::V1::RelationshipUpdate::Operation::OPERATION_CREATE, + relationship: Authzed::Api::V1::Relationship.new( + resource: Authzed::Api::V1::ObjectReference.new(object_type: 'post', object_id: '1'), + relation: 'writer', + subject: Authzed::Api::V1::SubjectReference.new( + object: Authzed::Api::V1::ObjectReference.new(object_type: 'user', object_id: 'emilia'), + ), + ), + ), + # Beatrice is a Reader on Post 1 + Authzed::Api::V1::RelationshipUpdate.new( + operation: Authzed::Api::V1::RelationshipUpdate::Operation::OPERATION_CREATE, + relationship: Authzed::Api::V1::Relationship.new( + resource: Authzed::Api::V1::ObjectReference.new(object_type: 'post', object_id: '1'), + relation: 'reader', + subject: Authzed::Api::V1::SubjectReference.new( + object: Authzed::Api::V1::ObjectReference.new(object_type: 'user', object_id: 'beatrice'), + ), + ), + ), + ] + ) +) + +puts resp.written_at.token +``` + + + + +```java +import com.authzed.api.v1.PermissionService; +import com.authzed.api.v1.PermissionsServiceGrpc; +import com.authzed.grpcutil.BearerToken; +import com.authzed.api.v1.Core.*; +import io.grpc.*; + +public class App { + public static void main(String[] args) { + ManagedChannel channel = ManagedChannelBuilder + .forTarget("grpc.authzed.com:443") + .useTransportSecurity() + .build(); + + BearerToken bearerToken = new BearerToken("t_your_token_here_1234567deadbeef"); + PermissionsServiceGrpc.PermissionsServiceBlockingStub permissionsService = PermissionsServiceGrpc.newBlockingStub(channel) + .withCallCredentials(bearerToken); + + PermissionService.WriteRelationshipsRequest request = PermissionService.WriteRelationshipsRequest.newBuilder() + .addUpdates( + RelationshipUpdate.newBuilder() + .setOperation(RelationshipUpdate.Operation.OPERATION_CREATE) + .setRelationship( + Relationship.newBuilder() + .setResource( + ObjectReference.newBuilder() + .setObjectType("post") + .setObjectId("1") + .build()) + .setRelation("writer") + .setSubject( + SubjectReference.newBuilder() + .setObject( + ObjectReference.newBuilder() + .setObjectType("user") + .setObjectId("emilia") + .build()) + .build()) + .build()) + .build()) + .build(); + + PermissionService.WriteRelationshipsResponse response; + try { + response = permissionsService.writeRelationships(request); + String zedToken = response.getWrittenAt().getToken(); + } catch (Exception e) { + // Uh oh! + } + } +} +``` + + + + +## Checking Permissions + +Permissions Systems that have stored relationships are capable of performing permission checks. +Checks not only test for the existence of direct relationships, but also compute and traverse transitive relationships. +For example, in our example schema, writers have both write and read permissions, so there's no need to create a read relationship for a subject that is already a writer. + + +In addition to checking _permissions_, it is also possible to perform checks on _relations_ to determine membership. + +However, this goes against best practice: permissions can be redefined in backwards compatible ways by changing the schema, so it's ideal to rely on permissions as the contract between SpiceDB and applications querying SpiceDB. + + + + +When doing a permission check, in order to get read-after-write consistency, you must provide a [ZedToken] from the WriteRelationships response or request [full consistency]. + +[ZedToken]: /spicedb/concepts/consistency#zedtokens +[full consistency]: /spicedb/concepts/consistency#fully-consistent + + + +The following examples demonstrate the transitive property of checks: + + + + +```sh +zed permission check post:1 read user:emilia --revision "zedtokenfromwriterel" # true +zed permission check post:1 write user:emilia --revision "zedtokenfromwriterel" # true +zed permission check post:1 read user:beatrice --revision "zedtokenfromwriterel" # true +zed permission check post:1 write user:beatrice --revision "zedtokenfromwriterel" # false +``` + + + + +```js +import { v1 } from "@authzed/authzed-node"; + +const { promises: client } = v1.NewClient( + "t_your_token_here_1234567deadbeef", + "grpc.authzed.com:50051", + // NOTE: Remove if SpiceDB is behind TLS + v1.ClientSecurity.INSECURE_PLAINTEXT_CREDENTIALS, +); + +const resource = v1.ObjectReference.create({ + objectType: "post", + objectId: "1", +}); + +const emilia = v1.ObjectReference.create({ + objectType: "user", + objectId: "emilia", +}); + +const beatrice = v1.ObjectReference.create({ + objectType: "user", + objectId: "beatrice", +}); + +const emiliaCanRead = await client.checkPermission( + v1.CheckPermissionRequest.create({ + resource, + permission: "read", + subject: v1.SubjectReference.create({ + object: emilia, + }), + }), +); +console.log( + emiliaCanRead.permissionship === + v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION, +); + +const emiliaCanWrite = await client.checkPermission( + v1.CheckPermissionRequest.create({ + resource, + permission: "write", + subject: v1.SubjectReference.create({ + object: emilia, + }), + }), +); +console.log( + emiliaCanWrite.permissionship === + v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION, +); + +const beatriceCanRead = await client.checkPermission( + v1.CheckPermissionRequest.create({ + resource, + permission: "read", + subject: v1.SubjectReference.create({ + object: beatrice, + }), + }), +); +console.log( + beatriceCanRead.permissionship === + v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION, +); + +const beatriceCanWrite = await client.checkPermission( + v1.CheckPermissionRequest.create({ + resource, + permission: "write", + subject: v1.SubjectReference.create({ + object: beatrice, + }), + }), +); +console.log( + beatriceCanWrite.permissionship === + v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION, +); +``` + + + + +```go +package main + +import ( + "context" + "log" + + pb "github.com/authzed/authzed-go/proto/authzed/api/v1" + "github.com/authzed/authzed-go/v1" + "github.com/authzed/grpcutil" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const spicedbEndpoint = "grpc.authzed.com:443" + +func main() { + systemCerts, err := grpcutil.WithSystemCerts(grpcutil.VerifyCA) + if err != nil { + log.Fatalf("unable to load system CA certificates: %s", err) + } + + client, err := authzed.NewClient( + spicedbEndpoint, + grpcutil.WithBearerToken("t_your_token_here_1234567deadbeef"), + systemCerts, + // These options are if you're self-hosting and don't want TLS: + // grpcutil.WithInsecureBearerToken("t_your_token_here_1234567deadbeef"), + // grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatalf("unable to initialize client: %s", err) + } + + ctx := context.Background() + + emilia := &pb.SubjectReference{Object: &pb.ObjectReference{ + ObjectType: "user", + ObjectId: "emilia", + }} + + beatrice := &pb.SubjectReference{Object: &pb.ObjectReference{ + ObjectType: "user", + ObjectId: "beatrice", + }} + + firstPost := &pb.ObjectReference{ + ObjectType: "post", + ObjectId: "1", + } + + resp, err := client.CheckPermission(ctx, &pb.CheckPermissionRequest{ + Resource: firstPost, + Permission: "read", + Subject: emilia, + }) + if err != nil { + log.Fatalf("failed to check permission: %s", err) + } + // resp.Permissionship == pb.CheckPermissionResponse_PERMISSIONSHIP_HAS_PERMISSION + + resp, err = client.CheckPermission(ctx, &pb.CheckPermissionRequest{ + Resource: firstPost, + Permission: "write", + Subject: emilia, + }) + if err != nil { + log.Fatalf("failed to check permission: %s", err) + } + // resp.Permissionship == pb.CheckPermissionResponse_PERMISSIONSHIP_HAS_PERMISSION + + resp, err = client.CheckPermission(ctx, &pb.CheckPermissionRequest{ + Resource: firstPost, + Permission: "read", + Subject: beatrice, + }) + if err != nil { + log.Fatalf("failed to check permission: %s", err) + } + // resp.Permissionship == pb.CheckPermissionResponse_PERMISSIONSHIP_HAS_PERMISSION + + resp, err = client.CheckPermission(ctx, &pb.CheckPermissionRequest{ + Resource: firstPost, + Permission: "write", + Subject: beatrice, + }) + if err != nil { + log.Fatalf("failed to check permission: %s", err) + } + // resp.Permissionship == pb.CheckPermissionResponse_PERMISSIONSHIP_NO_PERMISSION +} +``` + + + + +```py +from authzed.api.v1 import ( + CheckPermissionRequest, + CheckPermissionResponse, + Client, + ObjectReference, + SubjectReference, +) +from grpcutil import insecure_bearer_token_credentials + +client = Client( + "grpc.authzed.com:50051", + insecure_bearer_token_credentials("t_your_token_here_1234567deadbeef"), +) + +emilia = SubjectReference( + object=ObjectReference( + object_type="user", + object_id="emilia", + ) +) +beatrice = SubjectReference( + object=ObjectReference( + object_type="user", + object_id="beatrice", + ) +) + +post_one = ObjectReference(object_type="post", object_id="1") + +resp = client.CheckPermission( + CheckPermissionRequest( + resource=post_one, + permission="read", + subject=emilia, + ) +) +assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION + +resp = client.CheckPermission( + CheckPermissionRequest( + resource=post_one, + permission="write", + subject=emilia, + ) +) +assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION + +resp = client.CheckPermission( + CheckPermissionRequest( + resource=post_one, + permission="read", + subject=beatrice, + ) +) +assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION + +resp = client.CheckPermission( + CheckPermissionRequest( + resource=post_one, + permission="write", + subject=beatrice, + ) +) +assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_NO_PERMISSION +``` + + + + +```rb +require 'authzed' + +emilia = Authzed::Api::V1::SubjectReference.new(object: Authzed::Api::V1::ObjectReference.new( + object_type: 'user', + object_id: 'emilia', +)) +beatrice = Authzed::Api::V1::SubjectReference.new(object: Authzed::Api::V1::ObjectReference.new( + object_type: 'user', + object_id: 'beatrice', +)) +first_post = Authzed::Api::V1::ObjectReference.new(object_type: 'post', object_id: '1') + +client = Authzed::Api::V1::Client.new( + target: 'grpc.authzed.com:50051', + interceptors: [Authzed::GrpcUtil::BearerToken.new(token: 't_your_token_here_1234567deadbeef')], + credentials: :this_channel_is_insecure, +) + +resp = client.permissions_service.check_permission(Authzed::Api::V1::CheckPermissionRequest.new( + resource: first_post, + permission: 'read', + subject: emilia, +)) +raise unless Authzed::Api::V1::CheckPermissionResponse::Permissionship.resolve(resp.permissionship) == + Authzed::Api::V1::CheckPermissionResponse::Permissionship::PERMISSIONSHIP_HAS_PERMISSION + +resp = client.permissions_service.check_permission(Authzed::Api::V1::CheckPermissionRequest.new( + resource: first_post, + permission: 'write', + subject: emilia, +)) +raise unless Authzed::Api::V1::CheckPermissionResponse::Permissionship.resolve(resp.permissionship) == + Authzed::Api::V1::CheckPermissionResponse::Permissionship::PERMISSIONSHIP_HAS_PERMISSION + +resp = client.permissions_service.check_permission(Authzed::Api::V1::CheckPermissionRequest.new( + resource: first_post, + permission: 'read', + subject: beatrice, +)) +raise unless Authzed::Api::V1::CheckPermissionResponse::Permissionship.resolve(resp.permissionship) == + Authzed::Api::V1::CheckPermissionResponse::Permissionship::PERMISSIONSHIP_HAS_PERMISSION + +resp = client.permissions_service.check_permission(Authzed::Api::V1::CheckPermissionRequest.new( + resource: first_post, + permission: 'write', + subject: beatrice, +)) +raise unless Authzed::Api::V1::CheckPermissionResponse::Permissionship.resolve(resp.permissionship) == + Authzed::Api::V1::CheckPermissionResponse::Permissionship::PERMISSIONSHIP_NO_PERMISSION +``` + + + + + +```java +import com.authzed.api.v1.PermissionService; +import com.authzed.api.v1.PermissionsServiceGrpc; +import com.authzed.grpcutil.BearerToken; +import com.authzed.api.v1.Core.*; +import io.grpc.*; + +public class App { + public static void main(String[] args) { + ManagedChannel channel = ManagedChannelBuilder + .forTarget("grpc.authzed.com:50051") + .usePlaintext() + .build(); + + BearerToken bearerToken = new BearerToken("t_your_token_here_1234567deadbeef"); + + PermissionsServiceGrpc.PermissionsServiceBlockingStub permissionsService = PermissionsServiceGrpc.newBlockingStub(channel) + .withCallCredentials(bearerToken); + + ZedToken zedToken = ZedToken.newBuilder() + .setToken("zed_token_value") + .build(); + + PermissionService.CheckPermissionRequest request = PermissionService.CheckPermissionRequest.newBuilder() + .setConsistency( + PermissionService.Consistency.newBuilder() + .setAtLeastAsFresh(zedToken) + .build()) + .setResource( + ObjectReference.newBuilder() + .setObjectType("post") + .setObjectId("1") + .build()) + .setSubject( + SubjectReference.newBuilder() + .setObject( + ObjectReference.newBuilder() + .setObjectType("user") + .setObjectId("emilia") + .build()) + .build()) + .setPermission("read") + .build(); + + PermissionService.CheckPermissionResponse response; + try { + response = permissionsService.checkPermission(request); + response.getPermissionship(); + } catch (Exception e) { + // Uh oh! + } + } +} +``` + + + + +Here's a video walkthrough of creating a Permissions System on AuthZed Cloud: + + diff --git a/content/spicedb/links/_meta.ts b/content/spicedb/links/_meta.ts new file mode 100644 index 0000000..2364e37 --- /dev/null +++ b/content/spicedb/links/_meta.ts @@ -0,0 +1,24 @@ +import type { MetaRecord } from 'nextra' + +export default { + discord: { + title: "SpiceDB Discord", + href: "https://authzed.com/discord", + }, + discussions: { + title: "GitHub Discussions", + href: "https://github.com/orgs/authzed/discussions/new?category=q-a", + }, + "zanzibar-paper": { + title: "Annotated Zanzibar Paper", + href: "https://authzed.com/zanzibar", + }, + "awesome-list": { + title: "SpiceDB Awesome List", + href: "https://github.com/authzed/awesome-spicedb#user-content-awesome-spicedb", + }, + examples: { + title: "Community Examples", + href: "https://github.com/authzed/examples", + }, +} satisfies MetaRecord; diff --git a/content/spicedb/modeling/_meta.ts b/content/spicedb/modeling/_meta.ts new file mode 100644 index 0000000..e5b0f90 --- /dev/null +++ b/content/spicedb/modeling/_meta.ts @@ -0,0 +1,18 @@ +import type { MetaRecord } from 'nextra' + +export default { + "developing-a-schema": "Developing a Schema", + "composable-schemas": "Composable Schemas (Preview)", + "representing-users": "Representing Users", + "validation-testing-debugging": "Validation, Testing, Debugging", + "recursion-and-max-depth": "Recursion & Max Depth", + "protecting-a-list-endpoint": "Protecting a List Endpoint", + "migrating-schema": "Updating and Migrating Schema", + "access-control-management": "Access Control Management", + "access-control-audit": "Access Control Audit", + attributes: "Incorporating Attributes", + "dev-mcp-server": { + title: "SpiceDB Dev MCP Server", + href: "/mcp/authzed/spicedb-dev-mcp-server", + }, +} satisfies MetaRecord; diff --git a/content/spicedb/modeling/access-control-audit.mdx b/content/spicedb/modeling/access-control-audit.mdx new file mode 100644 index 0000000..ada635b --- /dev/null +++ b/content/spicedb/modeling/access-control-audit.mdx @@ -0,0 +1,107 @@ +import { Callout } from "nextra/components"; + +# Access Control Audit + +Aside from providing means to [manage access control], another common feature is tools to audit access control. +This is put in place to let application administrators or users themselves understand: + +- what permissions do users have +- how permissions have changed over time + +This is intimately related to [access-control management] but focuses on other types of queries over your application permissions. + +## Auditing User Permissions: What Permissions Does A User Have Over A Resource? + +If you want to determine the computed permission a user has over a resource you can leverage two different APIs in SpiceDB. + +- [ExperimentalReflectSchema] lets you query the permissions available for a specific resource. + You can even filter by permission name, or a permission name prefix. +- [BulkCheckPermission] lets you perform various permission checks in a single round-trip. + This is more efficient than issuing individual checks because SpiceDB will batch many of the subproblems involved. + +## Auditing Access Grants: How Have Access Grants Changed? + +As we described in [access-control management], we distinguish **access grants** from **permissions**: + +- user `joe` being assigned as `reader` on repository `kubernetes` is an access grant +- user `joe` having `read` permission over repository `kubernetes` is a permission + +Access grants describe how a subject can be granted permission to perform an operation over a resource, like for example _"a user views repository"_. +A permission is a computation of all the different ways in which a user can be granted (or denied) access, like a direct role grant, indirectly via a team, indirectly as the owner of a top-level organization, temporarily as an auditor... + +To understand how access grants in the system have changed over time, you can use SpiceDB [Watch] API, which lets you stream all relationship changes. +Please note that the [Watch] API is a near real-time streaming API, and thus the application developer is responsible for persisting the audit history according to their needs. +By default, SpiceDB holds up to 24 hours of change history, after which it is garbage collected automatically. + +Let's assume the traditional GitHub repository authorization model: + +```zed +definition user {} + +definition team { + relation member: user +} + +definition repository { + relation role_reader: user | team#member + relation role_writer: user | team#member + relation role_adminer: user | team#member + + permission read = role_reader + write + permission write = role_writer + admin + permission admin = role_adminer + +} +``` + +If you wanted to understand how the access grants to the `kubernetes` repository have changed over time, you could call the [Watch] API and filter by `resource_type=repository` and `optional_resource_id=kubernetes`. + +## Auditing Permissions: How Have Permissions Changed? + + + Auditing permission changes is a very complex problem to solve at scale: + that's why we built [Authzed Materialize]. + + +SpiceDB does not offer an API to stream permission changes, but you could use a combination of APIs to compute a limited audit trail of permission changes. +This strategy could also be used to build a materialized index of permissions to [Protect a List Endpoint] in your application's database. + + + Please note this strategy is **very computationally intensive**, and it would + very likely require scaling out a SpiceDB cluster. It's only likely to work + under very narrow use cases that can exploit domain awareness to reduce how + many computations need to be run. + + +1. Use the [Watch] API to stream all relationship changes in the system +1. Use the [ExperimentalComputablePermissions] API to determine all permissions affected by a relationship change + +These two APIs will be used as the foundation to start recomputing permissions, and the more siloed and controlled the use case, the higher the chances this can work at scale. + +- If you can silo the relationship changes to a specific resource, you could recompute all users that have permission over that resource by running [LookupSubjects] for each one of the affected permissions determined by [ExperimentalComputablePermissions]. + Please note this does not give you the delta of permission changes, that will be the responsibility of the application developer. +- However, if you can't exploit the application domain to silo how a permission changes, you'd be forced to recompute _all subjects over all available resources_, which is a **very** expensive operation to run to yield a potentially small delta of permission changes. + +## Auditing SpiceDB API Request and Responses + +SpiceDB does not offer an API to determine what API calls it has received. +Some alternative options include: + +- Building a custom middleware for SpiceDB and using the [Middleware Framework]. +- Wrapping SpiceDB into another service that acts as the single access point to SpiceDB and thus can audit all access +- Retrieving access logs using SpiceDB `--grpc-log-requests-enabled` and `--grpc-log-response-enabled` +- Using [Authzed Audit Logging] + +[manage access control]: ./access-control-management +[access-control management]: ./access-control-management +[ExperimentalComputablePermissions]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.ExperimentalComputablePermissions +[BulkCheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.BulkCheckPermission +[Watch]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.WatchService.Watch +[Authzed Materialize]: https://authzed.com/products/authzed-materialize +[ExperimentalComputablePermissions]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.ExperimentalComputablePermissions +[BulkCheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.BulkCheckPermission +[Protect a List Endpoint]: ./protecting-a-list-endpoint +[ExperimentalReflectSchema]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.ExperimentalReflectSchema +[LookupSubjects]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.LookupSubjects +[Authzed Audit Logging]: https://authzed.com/docs/authzed/concepts/audit-logging +[Middleware Framework]: https://github.com/authzed/spicedb/blob/b6f08c0fd2880540cbf564188b89141493ea3273/pkg/cmd/server/middleware.go diff --git a/content/spicedb/modeling/access-control-management.mdx b/content/spicedb/modeling/access-control-management.mdx new file mode 100644 index 0000000..7e9336e --- /dev/null +++ b/content/spicedb/modeling/access-control-management.mdx @@ -0,0 +1,108 @@ +import { Callout } from "nextra/components"; + +# Access-Control Management + +It's common to build applications with an access control management interface that lets privileged users visualize and modify permissions assigned to other users. +Depending on the permission model you've chosen for your application, several APIs in SpiceDB will help you to implement the feature. + +Let's describe some of the most common patterns. + +## Resource-Specific Access-Control Management + +When creating a UI to manage user access-control privileges over a resource, there are various potential features one may want to implement: + +- listing all users assigned with their role +- listing all the fine-grained permissions a user may have assigned + +### Coarse-Grained Access Control + +Let's say we have a traditional coarse-grained access control model where you assign roles to users over a resource, like GitHub's Role-Based Access Control model, and you want to list each one of the users and teams assigned alongside the role. +These role assignments are typically implemented as a relation between the target resource and the subject. + +```zed +definition user {} + +definition team { + relation member: user +} + +definition repository { + relation role_reader: user | team#member + relation role_writer: user | team#member + relation role_adminer: user | team#member + + permission read = role_reader + write + permission write = role_writer + admin + permission admin = role_adminer + +} +``` + +If you wanted to implement an access-control management UI for this model, you could use the [ReadRelationships] API. +When the set of roles is limited, you can do a call per role: + +```shell zed +zed relationship read repository:kubernetes role_reader +zed relationship read repository:kubernetes role_writer +zed relationship read repository:kubernetes role_adminer +``` + +If instead, you didn't want to hard-code each role in your application you could first determine the available relations, and then read the associated relationships: + +1. List all relations on the definition using [ExperimentalReflectSchema], which will let you query information about the schema, with the [Schema Filter] set to `optional_definition_name_filter=repository` and `optional_relation_name_filter=role_`. +1. Issue a `ReadRelationship` request for each role returned from the API call above + +### Fine-Grained Access Control + +Now let's imagine that instead, we have a fine-grained access control model. +Typically you see these implemented as "Custom Roles". +It's still a Role-Based Access Control Model, but one that lets you define the individual permissions associated with each Role. +This model is not different from the previous one in which users are still assigned roles over the resource, but these roles are fully customizable, instead of pre-canned. + +```zed + +definition user{} + +definition repository { + relation grants: role_grant + + permission create_issue = grants->create_issue +} + +definition role_grant { + relation role: role + relation grantee: user | team#user + + permission create_issue = role->create_issue & grantee + permission delete_issue = role->delete_issue & grantee +} + +definition role { + relation create_issue: user:* + relation delete_issue: user:* +} +``` + +You can follow a similar strategy if you want to show the roles: + +```shell zed +# returns the role grants over the kubernetes repository +zed relationship read repository:kubernetes grants +``` + +This will let you list each user and team with the assigned role name. +But what if you wanted instead to list the individual permissions, rather than the role? + +You could issue a `ReadRelationship` to determine the `role` of the grant, and a subsequent one with `role:my_role` as the resource and list all the relationships. +The resource relation of each list item represents all fine-grained permissions for the role. + +```shell zed +# returns the role of the grant +zed relationship read role_grant:my_role_grant role +# returns the fine-grained permissions of the role +zed relationship read role:my_role +``` + +[ReadRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.ReadRelationships +[ExperimentalReflectSchema]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExperimentalService.ExperimentalReflectSchema +[Schema Filter]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.ExpSchemaFilter diff --git a/content/spicedb/modeling/attributes.mdx b/content/spicedb/modeling/attributes.mdx new file mode 100644 index 0000000..067d7af --- /dev/null +++ b/content/spicedb/modeling/attributes.mdx @@ -0,0 +1,107 @@ +import { InlinePlayground } from "@/components/playground"; +import { Callout } from "nextra/components"; + +# Attributes + +If you are migrating to SpiceDB from a pre-existing authorization system, it's likely that attributes play a part in your authorization evaluations. + +SpiceDB is a Relationship Based Access control system. +This gives SpiceDB the flexibility to evaluate attributes for access control alongside more complicated access control logic like roles and/or relationships. + +The sections below will provide practical examples for implementing various kinds of attributes in the SpiceDB schema language. +Before reading this guide, it's recommended that you have some familiarity with the SpiceDB schema language. [These documents](/spicedb/modeling/developing-a-schema) are a good place to start. + +## Boolean Attributes + +A boolean attribute is an attribute on an object that affects authorization by enabling or disabling an authorization setting. +Boolean attributes can often be thought of as a toggle. +Feature flag authorization can be enabled with boolean attributes. + +### Wildcards + +[Wildcards](/spicedb/concepts/schema#wildcards) are a way to implement boolean attributes. +Wildcards modify a type so that a relationship can be written to all objects of a resource type but not individual objects. + +In the example below, the schema enforces the following authorization logic: a user will have `edit` permission on the document if they are related to the document as an `editor` and they relate to the document through `edit_enabled`. +Both are required because `editor` and `edit_enabled` are [intersected](/spicedb/concepts/schema#-intersection) at the `edit` permission definition. +To enable document editing, you need to establish a relationship that connects all users to the document using the `edit_enabled` relation: `document:somedocument#edit_enabled@user:*`. + + + + + Wildcards are adequate for most binary attribute scenarios; however, wildcards + are not currently supported by [Authzed + Materialize](/authzed/concepts/authzed-materialize). Those who plan to use + Materialize should use self relationships for binary attributes. + + +### Self Relationships + +Self relationships are another way to implement boolean attributes. +Self relationships relate an object to itself. +A self relationship is walked with an [arrow](/spicedb/concepts/schema#--arrow) back to an object's self. +In practice, relating something to itself toggles something on. + +In the example below, there is a schema that enforces the following authorization logic: a user can only view a document if the user is related to the document as viewer and editing is enabled for the document (this is the same authorization logic used in the wildcard example above). + +In the example below, to enable editing for a document, a self relationship using the `edit_enabled` relation must be written. +When a `document` is related to itself with the `edit_enabled` relation, that relation can be walked to itself (with the arrow) to determine who relates to the document as an `editor`. + +In summary, a `user` has permission to edit a `document` if they are related to that document as an `editor` and that document is related to itself with `edit_enabled`. + + + + + There is no mechanism in the SpiceDB schema language that enforces that a + relation be used as a self relation. In order to avoid accidentally misusing a + self relation (e.g. relating an object to a different instance of the same + type) it is recommended to implement client side logic that enforces only + using the self relation for it's intended purpose. + + +## Attribute Matching + +For this guide, attribute matching is defined as scenarios where a user or group of users needs to have an attribute (or set of attributes) required by a resource in order to perform a specific action on the resource. + +### Match at Least One Attribute of a Single Type + +Attribute matching can be achieved by relating a user to an attribute as a "member" and relating a resource to its required attribute objects. + +In the example below, users must match **at least one** of the document's country attributes in order to view the document. + +Country attributes are represented by the `country` object definition and every user that has a specific country attribute is related to the specific country. +When a `document` has a country attribute that can grant `edit` permission for a user, it is related to that country. + + + +### Match all Attributes of a Single Type + +It's possible to specify that **_all_** attributes must be satisfied by using an [intersection arrow](/spicedb/concepts/schema#all-intersection-arrow). + +In the example below, users must match **all** of the document's `country` attributes in order to view the document. + +This example is similar to the one above, except it requires that all attributes are satisfied instead of at least one attribute. + + + +### Match at Least One Attribute from Each Type of Attribute + +When you have several types of attributes, it's recommended that you have an object definition for each type of attribute and that you use [subject relations](/spicedb/concepts/schema#subject-relations) to connect resources to the required attribute. + + + +### Match All Attributes from Each Type of Attribute + +It's possible to specify that **_all_** attributes must be satisfied by using an [intersection arrow](/spicedb/concepts/schema#all-intersection-arrow). + +In the example below, users must match **all** of the document's `country` and `status` attributes in order to view the document. + +This example is similar to the one above, except it requires that all `country` and `status` attributes are satisfied instead of at least one attribute of each type. + + + +## Caveats + +In almost all cases, [caveats](/spicedb/concepts/caveats) should only be used when data required to evaluate a CheckPermission request is only available at the time of the request (e.g. user's current location or time of day). +Using caveats for static data (e.g. a document's status) can have negative performance impacts. +Static attribute data should always be modeled in the schema using patterns similar to those described above. diff --git a/content/spicedb/modeling/composable-schemas.mdx b/content/spicedb/modeling/composable-schemas.mdx new file mode 100644 index 0000000..68bf44f --- /dev/null +++ b/content/spicedb/modeling/composable-schemas.mdx @@ -0,0 +1,206 @@ +import { Callout, Tabs } from "nextra/components"; + +# Composable Schemas (Preview) + + + This preview feature's functionality may change before general release. + + +To make it easier to organize your schema and collaborate across teams, `zed` version v0.27.0 introduced a new schema compilation command that allows you to modularize your schema. + +``` +zed preview schema compile root.zed +``` + +The command allows you to combine a schema that is spread across many files, for example: + + + + +```zed +import "./subjects.zed" + +partial view_partial { + relation user: user + permission view = user +} + +definition resource { + ...view_partial + + relation organization: organization + permission manage = organization +} +``` + + + + +```zed +definition user {} +definition organization {} +``` + + + + +And it produces an output schema that can be understood by SpiceDB's `WriteSchema` API: + +```zed +definition user {} + +definition organization {} + +definition resource { + relation user: user + permission view = user + + relation organization: organization + permission manage = organization +} +``` + +There are three new pieces of syntax: [import statements](#import-statements), [partial declarations](#partial-declarations), and [partial references](#partial-references). + +## Breaking Changes + +The composable schema compiler has some breaking changes relative to the compiler used internally by SpiceDB on a `WriteSchema` call. +A new version of SpiceDB should not cause your schema to break. +However, the schema compiler introduces some new keywords (among other changes), which may result in a schema that can be +written to SpiceDB but not compiled. + +The obvious breaking changes are `import` and `partial` becoming keywords, so if you have a permission or +relation with those names, your schema can't be compiled. +We have also reserved some keywords for future use, such as `use`, `and`, `or`, and `not`. +If you get an unexpected `TokenTypeKeyword` error, this is probably why. +A full list of reserved keywords can be found in [`keyword` map definition](https://github.com/authzed/spicedb/blob/main/pkg/composableschemadsl/lexer/lex_def.go#L74) in the lexer. + +## Import Statements + +Import statements allow you to break down a schema along the lines of top-level declarations. + + + + +```zed +// An import keyword followed by a quoted relative filepath +import "./one.zed" + +// Note that a bare filename works as a relative path +import "two.zed" + +// The imports are included by the compilation process, which means that +// they can be referenced by other definitions +definition resource { + relation user: user + relation organization: organization + + permission view = user + organization +} +``` + + + + +```zed +definition user {} +``` + + + + +```zed +definition organization {} +``` + + + + + +### Good to Know + +- Import references must be within the folder where `zed` is invoked. +- Import cycles are treated as errors. +- All definitions in all imported files are pulled in. + Any duplicate definitions will cause an error. + +## Partials + +Partial declarations and references provide a means of decomposing a schema along lines that cross definition boundaries. + +### Partial Declarations + +A partial declaration is a top-level block that is declared using the `partial` keyword. +It can contain relations, permissions, and partial references just like a `definition` block, but its contents +must be referenced by a [partial reference](#partial-references) to show up in the compiled schema. + +```zed +partial view_partial { + ...some_other_partial + + relation user: user + permission view = user +} +``` + +#### Good to Know + +- Any partial that isn't referenced is ignored by the compilation process. +- Partial declarations can contain partial references, allowing for partials to be composed. + +### Partial References + +A partial reference takes a `partial` and includes the relations and permissions defined in that partial. +It works similarly to [JS spread syntax](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Spread_syntax) +or [python's dictionary unpacking](https://docs.python.org/3/reference/expressions.html#dictionary-displays). + +This syntax: + +```zed +partial view_partial { + relation viewer: user + permission view = viewer +} + +partial edit_partial { + relation editor: user + permission edit = editor +} + +definition resource { + ...view_partial + ...edit_partial +} +``` + +is equivalent to this declaration: + +```zed +definition resource { + relation user: user + permission view = user + + relation editor: user + permission edit = editor +} +``` + +#### Good to Know + +- Duplicate relations and permissions introduced by a partial reference are treated as errors. +- Circular references between partials are treated as errors. +- You can only reference partial declarations. + Attempting to reference other declaration types (e.g. a definition or a caveat) with a partial reference will result in a error. +- A partial can be referenced any number of times, and a partial or definition can contain any number of partial references. + +## An Example Workflow + +1. Make a change to your multi-file schema +1. Run `zed validate` to ensure that the changes are valid +1. Make a PR to your schema repository +1. CI runs `zed validate` again + +Then on merge: + +1. CI runs `zed preview schema compile` +1. CI calls SpiceDB's WriteSchema API with the compiled schema diff --git a/content/spicedb/modeling/developing-a-schema.mdx b/content/spicedb/modeling/developing-a-schema.mdx new file mode 100644 index 0000000..a5e2f64 --- /dev/null +++ b/content/spicedb/modeling/developing-a-schema.mdx @@ -0,0 +1,419 @@ +import { Callout } from "nextra/components"; +import YouTube from "react-youtube"; +import { InlinePlayground } from "@/components/playground"; + +# Developing a Schema + +This document walks through the overarching process of developing a new schema from scratch. + +A useful companion to this document is the [schema language reference][schema-docs]. + +The following video provides an overview of schema development using the Playground: + +
+ + +[schema-docs]: ../concepts/schema + +## Defining Object Types + +The first step in developing a new schema is to write one or more [Object Type definitions]. + +For this example, let's imagine a basic system consisting of a resource to be protected, such as a `document`, and `users` that can potentially access them. + +We begin by defining each of the object types via the `definition{:zed}` keyword: + +```zed +definition user {} + +definition document {} +``` + +So far, our schema and object definitions don't do much; they define the two types of objects for our system, but as they don't have any [relations] or [permissions] defined, the objects cannot be related to one another in any form, nor can we check any permissions on them. + +[Object Type definitions]: ../concepts/schema#object-type-definitions +[relations]: ../concepts/schema#relations +[permissions]: ../concepts/schema#permissions + +## Defining Relations + +Our next step is to decide how our objects can relate to one another, thus defining the kind of [relationships] we can store in SpiceDB. + +For this example, we've chosen a simple RBAC-style permissions model, where `users` can be granted a _role_, such as `reader`, on our resource, `document`. + +This choice of model means that the relations between our resource, `document`, and our `users` will be defined by the roles we want. +We can therefore start by defining a relation on our `document` type to represent one of these roles: `reader` in the following example. + +```zed +definition user {} + +definition document { + relation reader: user +} +``` + +Note the inclusion of `user` on the right hand side of the `reader` relation: this indicates that only objects of type `user` can relate via a `reader` relationship to a `document`. + +If we wanted more than a single allowed object type, the `|{:zed}` character can be used: + +```zed {5} +definition user {} +definition bot {} + +definition document { + relation reader: user | bot +} +``` + +[relationships]: ../concepts/relationships + +## Validating our schema + +To validate that our schema is correct, both [zed] and [Playground] support the writing of _test relationships_ as data writing tests against our schema. +Once we've [created test relationships][understanding-rels], we can define tests in three ways: + +- **Check Watches**: live checks performed as we edit the schema +- **Assertions**: positive or negative assertions to be verified when validation is run +- **Expected Relations**: exhaustive listing of all expected permissions and relations for a schema when validation is run + +[zed]: ../getting-started/installing-zed +[playground]: https://play.authzed.com +[understanding-rels]: ../concepts/relationships#understanding-relationships + +### Check Watches + +
+ + +### Assertions + +After you have a basic schema and some data to validate, you can write _assertions_ to ensure that the schema meets expectations. + +Assertions are written as two YAML lists containing zero or more relationships to verify: `assertTrue` and `assertFalse`. + +For this example, we wish to verify that the specific user given the `reader` role has said role, so we can write an assertion to validate it: + +```yaml +assertTrue: + - "document:specificdocument#reader@user:specificuser" +assertFalse: [] +``` + +Similarly, if we wanted to validate that _another_ user does not have that role, we can add that unexpected relationship to the `assertFalse` branch: + +```yaml +assertTrue: + - "document:specificdocument#reader@user:specificuser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" +``` + +Validations can be run by clicking the `Validate` button in the Playground or by using the `zed validate` command. + +### Expected Relations + +In addition to Check Watches and Assertions, there's also the concept of _Expected Relations_, which can be used to **exhaustively** check the membership of relations or permissions. + +The Expected Relations consists of a YAML-formatted map, with each key representing a relation, and the values being a list of strings holding the full set of expected relations. + +For example, we can write an empty first entry in our Expected Relations: + +```yaml +document:specificdocument#reader: [] +``` + +After hitting the `Update` button in the Playground, we are given the fully expanded form: + +```yaml +document:specificdocument#reader: + - "[user:specificuser] is " +``` + +While this example fails to demonstrate much more power than basic assertions, Expected Relations are far more powerful once we add additional relations and permissions to our schema. + +## Expanding our schema + +While being able to ask whether a user is a reader of a document is super useful, it is expected that the majority of permissions systems will consist of more than a single role. + +In this example we'd like to have a second role, that of `writer`, which will allow us to check if a user is a writer on the document. + +### Adding the writer relation + +To begin, we once again start by adding another relation, in this case `writer`: + +```zed +definition user {} + +definition document { + relation reader: user + relation writer: user +} +``` + +Next, we'd like to be able to test our new relation, so we add another test relationship for a different user: + +``` +document:specificdocument#reader@user:specificuser +document:specificdocument#writer@user:differentuser +``` + +To verify our test relationships worked, we can add another assertion, and also assert that the original user (`specificuser`) is _not_ a writer: + +```yaml +assertTrue: + - "document:specificdocument#reader@user:specificuser" + - "document:specificdocument#writer@user:differentuser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" + - "document:specificdocument#writer@user:specificuser" +``` + +Finally, we can add an expected relation for the new relation, to validate it: + +```yaml +document:specificdocument#reader: + - "[user:specificuser] is " +document:specificdocument#writer: + - "[user:differentuser] is " +``` + +## Defining permissions + +The above configuration and validation exposes one issue, however: users are assigned to a single relation `writer` or `reader`, but what if we wanted all users who could write a document to also be able to read a document? + +As a naive solution, we could create a `reader` relationship for every user whenever we create a `writer` relationship, but that would get difficult to maintain very quickly. + +Instead, we'd ideally like a user with role `writer` to be **implicitly** allowed to a read a document, such that we only ever need to write _one_ relationship representing the user's **actual** relation/role to the document. + +The solution to this problem is the second concept available within the Schema Language: [permissions]. A permission in a schema defines a permission _computed_ from one or more other relations or permissions. + +Let's take our schema again from above: + +```zed +definition user {} + +definition document { + relation reader: user + relation writer: user +} +``` + +Previously, we were checking if a specific user had a specific **role** (such as `reader`) on the document. +Now, however, we want to check if a specific user has a specific **permission** on the document, such as the ability to view the document. + +To support this use case, we can define a `permission`: + +```zed {6} +definition user {} + +definition document { + relation reader: user + relation writer: user + permission view = reader + writer +} +``` + +A `permission`, unlike a `relation`, cannot be explicitly written in the database: it is _computed_ at query time based on the expression found after the `=`. +Here, we compute the `view` permission to include any users found to have either the `reader` OR `writer` role, thus allowing users with either (or both) roles to view the document. + +[permissions]: ../concepts/schema#permissions + +### Updating our expected relations + +Now that we've updated our schema with our new permission, we can update our assertions and expected relations to ensure it functions as we expect. + +To start, we add an assertion that checks if the users can `view` the document: + +```yaml +assertTrue: + - "document:specificdocument#reader@user:specificuser" + - "document:specificdocument#writer@user:differentuser" + - "document:specificdocument#view@user:specificuser" + - "document:specificdocument#view@user:differentuser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" + - "document:specificdocument#writer@user:specificuser" +``` + +Next, we can update the expected relations to add the `view` permission, and ensure that both users have that permission on the document: + +```yaml +document:specificdocument#reader: + - "[user:specificuser] is " +document:specificdocument#view: + - "[user:differentuser] is " + - "[user:specificuser] is " +document:specificdocument#writer: + - "[user:differentuser] is " +``` + +Note that the contents of the angled brackets for `differentuser` and `specificuser` are **different**: they indicate the _relation_ by which the permission was transitively granted. + + + **Info:** + Expected Relations includes the relation by which a subject was found for a permission to ensure that not only is the permission is valid, but also that the _way_ a permission was validated matches that expected. + +If there are multiple ways that a subject can be found for a permission, Expected Relations will require _all_ of them to be listed to be valid. + + + +#### Working example + +
+ + +### Preparing to inherit permissions + +As we've seen above, we can use `permission` to define _implicit_ permissions, such as a `view` permission consisting of users either the `reader` or `writer` role. +Implicit permissions on a specific object type, however, are often insufficient: sometimes permissions need to be **inherited** between object types. + +As an example: imagine that we add the concept of an `organization` to our permissions system, where any user that is an administrator of an organization automatically gains the ability to `view` any `document` within that organization; how would we define such a permissions schema? + +### Defining the organization type + +To begin, we must first define the object type that represents our organization, including the `administrator` relation, to represent the administrator role for users: + +```zed {9,10,11,12} filename="Schema" +definition user {} + +definition document { + relation reader: user + relation writer: user + permission view = reader + writer +} + +/** organization represents an organization that contains documents */ +definition organization { + relation administrator: user +} +``` + +### Connecting organizations and documents + +In order for our inheritance to function, we must define a way to indicate that a document "lives" under an organization. +Fortunately, this is just another relation (between a `document` and its parent `organization`), so we can use another relation within the `document` type: + +```zed {9,10} filename="Schema" +definition user {} + +/** organization represents an organization that contains documents */ +definition organization { + relation administrator: user +} + +definition document { + /** docorg indicates that the organization owns this document */ + relation docorg: organization + + relation reader: user + relation writer: user + permission view = reader + writer +} +``` + +Here we've chosen to call this relation `docorg`, but it could be called anything: it is generally recommended to use either a contraction of the two namespaces being connected or, alternatively, a term representing the actual relationship between the object types (such as `parent`). + +### Adding the relationship + +Now that we've defined the `relation` to hold our new relationship, we can add a test relationship: + +```relationship filename="Test Relationships" +document:specificdocument#docorg@organization:someorg +``` + + + **Info:** Note the use of the organization as the **subject** in this + relationship + + +### Inheriting permissions + +Now that we have a means of stating that a document is owned by an organization, and a relation to define administrators role on the organization itself, our final steps are to add an `view_all_documents` permission to the organization and to edit the `view` permission to take this permission into account. + +To do so, we make use of the [arrow](../concepts/schema#--arrow) operator (`->`), which allows for referencing permissions _across_ another relation or permission: + +```zed filename="Schema" +/** user represents a registered user's account in our application */ +definition user {} + +/** organization represents an organization that contains documents */ +definition organization { + /** administrator indicates that the user is an admin of the org */ + relation administrator: user + + /** view_all_documents indicates whether a user can view all documents in the org */ + permission view_all_documents = administrator +} + +/** document represents a document with access control */ +definition document { + /** docorg indicates that the organization owns this document */ + relation docorg: organization + + /** reader indicates that the user is a reader on the document */ + relation reader: user + + /** writer indicates that the user is a writer on the document */ + relation writer: user + + /** view indicates whether the user can view the document */ + permission view = reader + writer + docorg->view_all_documents +} +``` + +The expression `docorg->view_all_documents` indicates to SpiceDB or Authzed to follow the `docorg` to any organizations found for the document, and then check for the user against the `view_all_documents` permission. + +By use of this expression, any user defined as an administrator of the organization that owns the document will also be able to view the document! + + + **Info:** It is _recommended_ that the right side of all arrows refer to + **permissions**, instead of relations. This allows for easy nested + computation, and is more readable. + + +### Adding an administrator user + +Now that we've declared that all users in `administrator` on the organization are also granted the `view` permission, let's define at least one user in our test data to be an administrator: + +```relationship filename="Test Relationships" +organization:someorg#administrator@user:someadminuser +``` + +### Testing inherited permissions + +Finally, we can add the user to the declarations in Assertions and Expected Relations and verify that the inheritance works: + +```yaml filename="Assertions" +assertTrue: + - "document:specificdocument#reader@user:specificuser" + - "document:specificdocument#writer@user:differentuser" + - "document:specificdocument#view@user:specificuser" + - "document:specificdocument#view@user:differentuser" + - "document:specificdocument#view@user:someadminuser" +assertFalse: + - "document:specificdocument#reader@user:anotheruser" + - "document:specificdocument#writer@user:specificuser" +``` + +```yaml filename="Expected Relations" +document:specificdocument#reader: + - "[user:specificuser] is " +document:specificdocument#view: + - "[user:differentuser] is " + - "[user:someadminuser] is " + - "[user:specificuser] is " +document:specificdocument#writer: + - "[user:differentuser] is " +``` + + + **Info:** + Note the expectation of `` for `someadminuser`, instead of `reader` or `writer` on the document: the permission is being granted by virtue of the user being an administrator of the organization. + + +## Example + +If you've been following along this entire document, you might find it useful to study this example including everything discussed: + +
+ diff --git a/content/spicedb/modeling/migrating-schema.mdx b/content/spicedb/modeling/migrating-schema.mdx new file mode 100644 index 0000000..1a2ffa0 --- /dev/null +++ b/content/spicedb/modeling/migrating-schema.mdx @@ -0,0 +1,210 @@ +import { Callout } from "nextra/components"; + +# Updating and Migrating Schema in SpiceDB + + + This page explains how to modify a SpiceDB schema. If you need information + about migrating the schema of a datastore underlying SpiceDB, like Postgres or + CockroachDB, go [here](/spicedb/concepts/datastore-migrations#migrations). If + you need information about migrating between SpiceDB instances, go + [here](/spicedb/ops/data/migrations). + + +[Schema] in SpiceDB represents the structural definitions of which relationships are allowed +in SpiceDB and how permissions are computed. + +[Schema]: developing-a-schema + +SpiceDB processes all calls to the [WriteSchema] in a **safe** manner: it is not possible to +break the type safety of a schema. + +As a result, certain operations are disallowed (as described below), but there is no risk in +accidentally breaking an internal computation. + +[WriteSchema]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.SchemaService.WriteSchema + +## Allowed Operations + +### Adding a new relation + +Adding a new `relation` to a `definition` is always allowed, as it cannot change any existing +types or computation: + +```zed {3} +definition resource { + relation existing: user + relation newrelation: user + permission view = existing +} +``` + +### Changing a permission + +Changing how a permission is computed is always allowed, so long as the expression +references other defined permissions or relations: + +```zed {4} /+ editor/ +definition resource { + relation viewer: user + relation editor: user + permission view = viewer + editor +} +``` + +### Adding a new subject type to a relation + +Adding a new allowed subject type to a relation is always allowed: + +```zed {2} /group#member/ +definition resource { + relation viewer: user | group#member + permission view = viewer +} +``` + +### Deleting a permission + +Removing a permission is always allowed, so long as it is not referenced by another +permission or relation. + + + While this cannot break the schema, it *can* break API callers if they are + making checks or other API requests against the permission. It is up to your + own CI system to verify that removed permissions are no longer referenced + externally. + + +## Contingent Operations + +For type safety reasons, any removal of a `relation` with data, or a `relation` or `permission` +referenced by _another_ `relation` or `permission` is disallowed. + +### Removing a relation + +A `relation` can _only_ be removed if _all_ of the relationships referencing it +have been deleted _and_ it is not referenced by any other `relation` or `permission` +in the schema. + +#### Process for removing a relation + +Given this example schema and we wish to remove relation `editor`: + +```zed {3} /relation editor: user/ +definition resource { + relation viewer: user + relation editor: user + permission view = viewer + editor +} +``` + +To remove `relation editor`: + +1. Change the schema to no longer reference the relation and call [WriteSchema] with the changes: + +```zed {4} +definition resource { + relation viewer: user + relation editor: user + permission view = viewer +} +``` + +1. Issue a [DeleteRelationships] call to delete _all_ relationships for the `editor` relation + +[DeleteRelationships]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.DeleteRelationships + +1. Update the schema to remove the relation entirely and call [WriteSchema] with the changes: + +```zed +definition resource { + relation viewer: user + permission view = viewer +} +``` + +### Removing an allowed subject type + +Similar to removing a relation itself, removing an allowed subject type can only be performed +once all relationships with that subject type on the relation have been deleted. + +#### Process for removing an allowed subject type + +Given this example schema and we wish to remove supporting `group#member` on `viewer`: + +```zed {2} /group#member/ +definition resource { + relation viewer: user | group#member + permission view = viewer +} +``` + +1. Issue a [DeleteRelationships] call to delete _all_ relationships for the `viewer` relation with subject type `group#member` + +1. Update the schema to remove the allowed subject type call [WriteSchema] with the changes: + +```zed +definition resource { + relation viewer: user + permission view = viewer +} +``` + +## Migrating data from one `relation` to another + +Given the constraints described above, migrating relationships from one `relation` to another +requires a few steps. + +Let's take a sample schema and walk through migrating data from `relation viewer` to a new `relation new_viewer`: + +```zed +definition resource { + relation viewer: user + permission view = viewer +} +``` + +1. Add the new relation: + +We start by adding the new relation and adding it to the `view` permission: + +```zed {3,4} /relation new_viewer: user2/ /+ new_viewer/ +definition resource { + relation viewer: user + relation new_viewer: user2 + permission view = viewer + new_viewer +} +``` + +1. Update the application: + +We next update our application so that it writes relationships to _both_ `relation viewer` and `relation new_viewer`. +This ensures that once we run the backfill (the next step), both relations are fully specified. + +1. Backfill the relationships: + +We next backfill the relationships by having our application write the relationships +for the `new_viewer` relation. +Make sure to copy _all_ relevant relationships in this step. + +1. Drop `viewer` from the permission: + +Once the relationships for `new_viewer` have been fully written and the permission has been verified, +(typically by issuing a CheckPermission request _directly_ to `new_viewer`), the `viewer` relation +can be dropped from the `view` permission: + +```zed {4} +definition resource { + relation viewer: user + relation new_viewer: user2 + permission view = new_viewer +} +``` + +1. Update the application: + +We next update our application to no longer write relationships to the `viewer` relation, +as it is no longer used. + +1. Delete the relation `viewer`: + +Finally, follow the instructions above for deleting a `relation` to delete the now-unused relation. diff --git a/content/spicedb/modeling/protecting-a-list-endpoint.mdx b/content/spicedb/modeling/protecting-a-list-endpoint.mdx new file mode 100644 index 0000000..42d3139 --- /dev/null +++ b/content/spicedb/modeling/protecting-a-list-endpoint.mdx @@ -0,0 +1,125 @@ +import { Callout } from "nextra/components"; + +# Protecting a List Endpoint + +## The Problem Statement + +In a normal monolithic application, the authorization for a list endpoint is typically rolled into the database calls +that the application makes to fetch the data in question. +Whether a user can access a resource is phrased in terms of a join on the tables that represent the authorization logic. + +In a system protected by SpiceDB, the authorization and data concerns are separated. +This means that enforcing authorization on a list endpoint requires making a call to both the database +and SpiceDB and combining the queries into a response. + +Broadly speaking, there are three ways to do this: + +- filtering with [LookupResources] +- checking with [CheckBulkPermissions] +- using [Authzed Materialize] to create a denormalized local view of a user's permissions. + +Which one you choose will depend on whether the set of accessible resources is much larger than a page of results returned by +your API, in addition to the overall size of your data. + +[LookupResources]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.LookupResources +[CheckBulkPermissions]: https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.CheckBulkPermissions +[Authzed Materialize]: https://authzed.com/products/authzed-materialize + +## Filtering with LookupResources + +If the number of resources that a user has access to is sufficiently small, you can use `LookupResources` to get the full +list of resources for which a user has a particular permission, and then use that as a filtering clause in your database +query. +In python pseudocode: + +```python +accessible_resource_ids = call_lookup_resources(user_id=some_user, permission=permission, resource_type=resource_type) +resources_for_response = fetch_resources_from_db(accessible_ids=accessible_resource_ids) +``` + +where `fetch_resources_from_db` would produce SQL that includes a clause equivalent to: + +```sql +WHERE id = ANY(ARRAY[]) +``` + +This is the simplest approach from a bookkeeping perspective and is a good place to start. +It should be noted that `LookupResources` can get heavy quickly - with a sufficiently large relation dataset, +a sufficiently complex schema, or a sufficiently large set of accessible results, you'll need to take a different approach. + +## Checking with CheckBulkPermissions + +If the number of resources that a user has access to is sufficiently large and `LookupResources` can't satisfy the use case +anymore, another approach is to fetch a page of results and then call `CheckBulkPermissions` to determine which of the +resources are accessible to the user. +In python pseudocode: + +```python +PAGE_SIZE = 20 + +results = [] + +while (len(results) < PAGE_SIZE): + candidate_results = fetch_resources_from_db(params=params, db_cursor=cursor) + accessible_ids = call_check_bulk_permissions( + ids=[result.id for result in candidate_results], + permission=permission, + user_id=user_id + ) + update_cursor(cursor) + results += [result for result in candidate_results if result.id in accessible_ids] + +# Return a page of results with size PAGE_SIZE +return results[:PAGE_SIZE] +``` + +Note that because we don't know how many results are going to be accessible beforehand, we need to iterate until we have a full page of +accessible results. +The performance of this approach will depend in part on choosing the size of the page of candidate results, and that in +turn will depend on the shape of your particular data. + +Note too that this approach works better with an API backed by cursor-based pagination than limit-offset pagination, since the database +doesn't know the offset associated with the last accessible result. + +This approach is handy for search interfaces since the filters on a search can reduce the set of candidate results +to the point where checking them via bulk check is relatively easy. + + + It's recommended to run the various CheckBulkPermissions API calls at the same + revision to get a consistent view of the permissions. (e.g. take the ZedToken + from the first call, and use it in all subsequent calls) + + +## Using Materialize + + + Materialize is currently in Early Access. Additional documentation and product + information will be coming soon. In the meantime, if you're interested, + [schedule a call!][Schedule Call] + + +[Authzed Materialize] is Authzed's version of the [Leopard cache] referenced in the Zanzibar paper, which provides a denormalized view +of user permissions to a consuming service. +This allows a service (e.g. a search service) to store a local copy of which users have permission to which resources, which +then means that ACL-aware filtering again becomes a simple JOIN against the local copy. + +In broad terms: + +- Authzed Materialize watches changes in your SpiceDB cluster and emits events as users gain and lose particular permissions to particular resources. +- Your service listens to that change stream and stores a local copy of the materialized view of user permissions +- Your API endpoints join with the tables where the local copy is stored to determine which resources are accessible + +This approach provides the greatest scalability of the three options, so if your data and/or traffic are prohibitive +under the above two approaches, we recommend giving Authzed Materialize a try. + +[Schedule Call]: https://authzed.com/call +[Leopard cache]: https://authzed.com/zanzibar/2IoYDUFMAE:0:T + +## Other Considerations + +### Decide on a semantic for an empty list + +There's a difference between "data exists but the user isn't allowed to see it" and "there is no data to be seen." +In a coarse-grained authorization system, it may make sense to return a 403 as a response from a list endpoint to +indicate that the user cannot access anything. +In a fine-grained authorization system backed by SpiceDB, it often makes sense to treat "there is no data" and "there is nothing you're authorized to see" as the same, by returning a successful response with an empty result set. diff --git a/content/spicedb/modeling/recursion-and-max-depth.mdx b/content/spicedb/modeling/recursion-and-max-depth.mdx new file mode 100644 index 0000000..58cd1d4 --- /dev/null +++ b/content/spicedb/modeling/recursion-and-max-depth.mdx @@ -0,0 +1,149 @@ +# Recursion and Max Depth + +Permissions questions in SpiceDB are answered by traversing the **tree** constructed from the graph formed +by combining the [schema] (structure) and [relationships] (data). + +A `CheckPermission` request will, for example, traverse starting from the resource+permission requested, +along any referenced permissions and relations, until the subject is found or maximum depth is +reached. + +[schema]: /spicedb/concepts/schema +[relationships]: /spicedb/concepts/relationships + +## Max Depth + +In order to prevent requests from traversing without bounds, SpiceDB comes with a defaults to a depth of +`50`, after which computation is halted and an error is returned to the caller. + +This max depth is configurable via the `--dispatch-max-depth` flag. + +## Recursion in Relationships + +As a result of expecting the permissions graph to be a **tree**, SpiceDB _does not_ support recursive data dependencies that result in operations +(such as `CheckPermission`) visiting the _same_ object more than once. + +### Example + +The following is an example of an **unsupported** nesting of groups: + +```zed +definition user {} + +definition group { + relation member: user | group#member +} + +definition resource { + relation viewer: user | group#member + permission view = viewer +} +``` + +and relationships: + +``` +resource:someresource#viewer@group:firstgroup#member +group:firstgroup#member@group:secondgroup#member +group:secondgroup#member@group:thirdgroup#member +group:thirdgroup#member@group:firstgroup#member +``` + +When computing a permission answer for `resource:someresource`, SpiceDB will attempt this walk: `resource:someresource#viewer` +-> `group:firstgroup#member` -> `group:secondgroup#member` -> `group:thirdgroup#member` -> +`group:firstgroup#member` -> ..., causing a cycle. + +## Common Questions + +### Why doesn't SpiceDB simply support tracking the objects it has walked? + +1. Nested recursive "sets" have unclear semantics. + +2. Undesirable overhead. + +#### Nested sets have semantics issues + +[Zanzibar] and ReBAC in general operate on _sets_: when a permission check is made, SpiceDB is +answering whether the requested subject is a member of the _set_ formed of all subjects that are +visited by walking the permissions tree. + +[Zanzibar]: https://zanzibar.tech + +The question becomes: if a group's members contains the members of _itself_, is that legal within +a set? +Much academic literature has been written about this topic (which we won't repeat here), +but the very question raises whether allowing such an approach is semantically valid. + +As a real example, imagine the following schema and relationships: + +```zed +definition user {} + +definition group { + relation direct_member: user | group#member + relation banned: user | group#member + permission member = direct_member - banned +} +``` + +``` +group:firstgroup#direct_member@group:secondgroup#member +group:firstgroup#banned@group:bannedgroup#member +group:secondgroup#direct_member@user:tom +group:bannedgroup#direct_member@group:firstgroup#member +``` + +As we see above,`user:tom` is a `direct_member` of `secondgroup`, which makes him a member +of `firstgroup` -> which implies he's a member of `bannedgroup` -> which implies he's _not_ +a member of `firstgroup` -> thus making him no longer `banned` -> (logical inconsistency) + +Thus, to prevent the above issue from occurring, Zanzibar and other ReBAC implementations such +as SpiceDB assume the permissions graph is a [tree]. + +[tree]: https://zanzibar.tech/2SMVg4W_Wx:N:k + +#### Overhead + +From a practical perspective, tracking of visited objects when computing `CheckPermission` and +other permissions queries results in having significant amount of overhead over the wire and in +memory to track the full set of encountered objects and check for duplicates. + +### What do I do about a max depth error on CheckPermission? + +If you've received an error like: + +``` +the check request has exceeded the allowable maximum depth of 50: this usually indicates a recursive or too deep data dependency. Try running zed with --explain to see the dependency +``` + +Run `zed --explain` with the parameters of the check to show whether the issue is due to recursion or because the tree is simply too deep: + +```sh +zed permission check resource:someresource view user:someuser --explain +``` + +```ansi +1:36PM INF debugging requested on check +! resource:someresource viewer (4.084125ms) +└── ! group:firstgroup member (3.445417ms) + └── ! group:secondgroup member (3.338708ms) + └── ! group:thirdgroup member (3.260125ms) + └── ! group:firstgroup member (cycle) (3.194125ms) +``` + +### Why did my check work with recursion? + +SpiceDB automatically short-circuits `CheckPermission` operations when the target subject has been +found. + +If the subject was found before the maximum depth was hit, then the operation will complete +successfully. +_However_, if the subject was not found, SpiceDB will continue walking, and ultimately return +the error you saw. + +### How do I check for a possible recursion when writing a relationship? + +Use the `CheckPermission` API to check if the subject contains the resource. + +For example, if writing the relationship `group:someparent#member@group:somechild#member` a check +can be made for `group:somechild#member@group:someparent#member`: if the _parent_ has permission +_on the child_, then the addition of this relationship will cause a recursion. diff --git a/content/spicedb/modeling/representing-users.mdx b/content/spicedb/modeling/representing-users.mdx new file mode 100644 index 0000000..a0c64be --- /dev/null +++ b/content/spicedb/modeling/representing-users.mdx @@ -0,0 +1,138 @@ +import { Callout } from "nextra/components"; + +# Representing Users + +Within a Permissions System, a [CheckPermission] call is always made between an object representing the _resource_ and an object representing a _subject_: The API call returns whether the _subject_ has the specified permission on the _resource_. + +[CheckPermission]: https://buf.build/authzed/api/docs/main:authzed.api.v1#CheckPermission + +## Representing users as subjects + +The most common kind of subjects found within a permissions system are some form of **user**. + +Users in SpiceDB/Authzed are [modeled as object types], same as resources. + +[modeled as object types]: https://authzed.com/blog/why-model-users/ + +Typically, it is users that are accessing your application or service and, therefore, it is for them that the various permissions must be checked. + +Choosing how to represent a user as a subject in Authzed/SpiceDB is very important, as the wrong choice can cause permissions checks to be incomplete or, in some cases, wrong. + +### Using a _stable_ external identifier + +The most common and recommended approach for representing a user as a subject is to use a **stable** identifier for the user as the subject's object ID. + +For example, if the authentication system used is OIDC and provides a `sub` field, then the object IDs for the users might be the `sub` field: + +``` +check resource:someresource view user:goog|487306745603273 +``` + +As the `sub` field is **guaranteed** to be stable for that particular user (if a compliant OIDC implementation), it is safe to use for checking permissions, as there is no risk that the `sub` will somehow represent a different user later. + + + +If you have _multiple_ authentication providers, then the recommendation is to define a subject type for _each_ provider, to ensure a clean namespace: + +```zed +/** githubuser represents a user from GitHub */ +definition githubuser {} + +/** gitlabuser represents a user from GitLab */ +definition gitlabuser {} +``` + + + +### Using a primary key + +The second most common approach is to have a representation of the subject in another backing data store, typically a relational database. + +If such a database exists, and there exists a single row per user, then using the row's primary ID (typically an integer or a UUID) represents another safe ID to use for user: + +``` +check resource:someresource view user:1337 +``` + + + +If using an auto-generated or auto-incrementing integer, make sure it cannot be reused. +Some databases allow various sequences to reuse IDs. + + + +### What about e-mail addresses? + + + +It is typically **not recommended** to use an e-mail address to represent a user as a subject in SpiceDB. + + + +This is for a number of reasons: + +- E-mail addresses are not universally stable and, often times, services allow them to be reused +- E-mail addresses are not universally verified and, often times, a caller of the [CheckPermission] may not be _certain_ that the user has that e-mail address +- Authzed/SpiceDB does not allow for `@` characters within object IDs + +If you know for **certain** that the e-mail address for a user is both stable and verified, and still wish to use it as the subject ID, then we recommend base64 encoding (with padding removed) the e-mail address to use it within Authzed/SpiceDB. + +## Representing anonymous visitors as subjects + +Some applications allow for _anonymous_ access to view (and occasionally, edit) various resources. + +Representing an anonymous visitor in Authzed/SpiceDB can be done via simply defining another subject type to represent the unauthenticated users: + +```zed +/** user represents a specific authenticated user */ +definition user {} + +/** anonymoususer represents an unauthenticated user */ +definition anonymoususer {} +``` + +To grant access to anonymous users to a resource, either a single **static** object ID can be used to represent _all_ anonymous users (such as `all`) or [wildcards] can be used: + +```zed document with anonymous user wildcard allowed +definition document { + relation reader: user | anonymoususer:* +} +``` + +[wildcards]: /spicedb/concepts/schema#wildcards + + + +It is recommended to use _wildcard_ with an anonymous user definition if there is ever a need to differentiate between anonymous users based on their object IDs. + +As an example, an anonymous user of a commenting system might be assigned a unique ID that is stored in their browser's cookies, enabling permission for editing a previously posted comment. + + + +## Representing services as subjects + +If your permissions checks are between machines or services and other services, it is recommended that the subject type be a representation of that service or its means of providing its identity. + +For example, you might represent a service directly: + +```zed +definition service {} + +definition resource { + relation viewer: service +} +``` + +Or via a token it was granted, by use of a reference to a _subject relation_: + +```zed +definition token {} + +definition service { + relation token: token +} + +definition resource { + relation viewer: service#token +} +``` diff --git a/content/spicedb/modeling/validation-testing-debugging.mdx b/content/spicedb/modeling/validation-testing-debugging.mdx new file mode 100644 index 0000000..75c78ab --- /dev/null +++ b/content/spicedb/modeling/validation-testing-debugging.mdx @@ -0,0 +1,288 @@ +import { Callout } from "nextra/components"; +import YouTube from "react-youtube"; + +# Validation, Testing, Debugging SpiceDB Schemas + +Whether you're designing the first iteration of your schema or you're running SpiceDB in production, you'll want tools to build confidence in performance, correctness, and design choices. +Tools for validation, testing, and debugging are often overlooked by those building bespoke systems because they can only dedicate enough engineering resources to solving their problem, rather than creating the proper foundation they'll need to continue to be successful in the future. + +SpiceDB has been designed with an eye towards being a foundation for authorization and subsequently provides various tools for working. + +## SpiceDB + +### Integration Test Server + +In order for applications to more easily perform integration tests against SpiceDB, there is a command in SpiceDB for running an integration test server. +The integration test server provides an isolated, empty datastore for each unique preshared key used to authenticate an API request. +The result of this design is that applications can run integration tests in parallel all against a single SpiceDB so long as they provide a unique credential per test. +By default, the server runs on port `50051` and also runs an additional read-only server on port `50052`. + +You can run the integration test server by executing `spicedb serve-testing` or by using our [GitHub Action][integration-action] that runs the same command. + +[integration-action]: #authzedaction-spicedb + +### CheckPermission Tracing Header + +While it is recommended that SpiceDB schema be validated and tested before production deployment, there are many scenarios where being able to see the actual paths taken against production data is incredibly important. + +To support this, SpiceDB's v1 CheckPermission API supports a debug header that will cause SpiceDB to trace the full set of relations and permission traversed while computing the check. + + + **Warning:** + Collecting these traces has a notable performance overhead. + +We do not recommend configuring your applications to enable this when debugging. +Instead, we recommend using [zed's explain flag] for this purpose. + +[zed's explain flag]: #explain-flag + + + +Configuring this header is done by setting the header `io.spicedb.requestdebuginfo` to the string `true`. + +The response will include a trailer, `io.spicedb.respmeta.debuginfo`, with a JSON-encoded tree. + +## Playground + +### Assertions + +In order to ensure that particular invariants are maintained in a schema, assertions about permissionship can be made. + +Assertions come in two flavors: positive and negative. +Assertions are written as a YAML list containing zero or more relationships. + +```yaml +assertTrue: + - "document:validation-testing-debugging#reader@user:you" +assertFalse: [] +``` + +#### Caveat Context In Assertions + + + In order to escape JSON representation of the caveat context in an assertion + you should use single-quotes. + + +You can provide caveat context as part of an assertion: + +```yaml +assertTrue: + - 'document:validation-testing-debugging#reader@user:you with {"somecondition": 42, "anothercondition": "hello world"}' +assertFalse: [] +``` + +You can also assert that a caveat context is required for a particular expression using `assertCaveated`: + +```yaml +assertTrue: [] +assertCaveated: + - "document:validation-testing-debugging#reader@user:you" +assertFalse: [] +``` + +### Check Watches + +Check Watches are type of assertion that updates in real-time with changes in the Playground. +This enables an even tighter feedback-loop when developing a schema. + +Below is an example of configuring a Check Watch: + +
+ + +Watches can show any of the following states: + +- ✅ Permission Allowed +- ❔ Permission Caveated +- ❌ Permission Denied +- ⚠️ Invalid Check + +![check-watches](/images/check-watches.png) + +### Expected Relations + +Expected Relations are a type of assertion that can be used to enumerate access to a specific relation. +This is useful when you want to exhaustively determine all the possible ways that one might acquire access. + +Expected Relations are written as YAML lists for each relation: + +```yaml +document:validation-testing-debugging#reader: + - "[user:you] is " +``` + +Because access can be transitive, Expected Relations include how they achieved access. +For example, if a schema is modeled hierarchically with a _platform_, _organization_, and _project_, Expected Relations for projects will include subjects from all points of the hierarchy that have access: + +```yaml +project:docs#admin: + - "[organization:authzed] is " + - "[user:rauchg] is " +``` + +### Caveats in Expected Relations + +When caveats are involved, and due to the unbounded nature of it, the Playground will focus on enumerating +expected relations with "maybe" semantics. +You can't specify an expected relation with a specific caveat context, because the Playground supports inferring those for you, +and that would lead potentially to an infinite number of possible caveat context values. + +What you'll see is an expected relation with the caveat context denoted as `[...]` right after the resource. +This reads as `user:rauchg may have admin permission over platform vercel`. + +```yaml +project:docs#admin: + - "[user:rauchg[...]] is " +``` + +### Exceptions in Expected Relations + +There are also scenarios where an expected relation is described with an exception, which indicates that a permission +holds for a specific resource and subject pair, but with a potential exception. + +The following example reads like: `user:rauchg has admin permission over platform vercel, unless user:rauchg is banned`. + +```yaml +project:docs#admin: + - "[user:rauchg[...]] is /" +``` + +## Check Tracing + +SpiceDB supports tracing of check requests to view the path(s) taken to compute the result, as well as timing information. + +Request tracing information by setting `with_tracing: true` in the request message and the information will be found in the response message. + + + **Warning:** In versions older than v1.31.0, request tracing information via a + header and the information will be found in the response footer as JSON. + + +## Zed + +### Zed Validate + +The `zed` binary provides a means of validating and testing a schema locally and in CI: + +```sh +zed validate my-schema.zed +``` + +It will load and validate the schema using the same parsing logic that the SpiceDB binary uses, +ensuring that a schema that passes validation will be considered a valid schema by your SpiceDB instance. + + + Note that a schema write can still fail if a relation is removed and there are + still instances of that relation in your database. `zed` doesn't know about + your data. + + +You can validate the functionality of your schema using validation yaml files, such as those exported +by the [Playground](#playground): + +```sh +zed validate schema-and-validations.yaml +``` + +Validation files take this form: + +```yaml +schema: |- + // schema goes here +# -- OR -- +schemaFile: "./path/to/schema.zed" + +# Note that relations are a single heredoc string rather than a yaml list +relationships: |- + object:foo#relation@subject:bar + object:baz#relation@subject:qux + +assertions: + assertTrue: + - object:foo#relation@subject:bar + assertFalse: + - object:foo#relation@subject:qux +validation: + object:foo#relation: + - "[subject:bar] is " +``` + +As of version v0.25.0, `zed validate` command can take multiple files as arguments: + +```sh +zed validate some-validations.yaml some-other-validations.yaml +``` + +This means you can validate a folder full of files using shell globbing: + +```sh +zed validate validations/* +``` + +There's an example of this available in the [examples repository](https://github.com/authzed/examples/tree/main/schemas/multiple-validation-files). + +If you're using GitHub, there's a [GitHub Action][validate-action] for running this validation. + +[validate-action]: #authzedaction-spicedb-validate + +### Explain Flag + +The `zed permission check` command has an optional flag, `--explain`, that will cause SpiceDB to collect the actual paths taken against the live system to compute a permission check. +If you're interested in learning more about this functionality in SpiceDB, you can read about the [tracing header] above. + +Here's an example using `--explain`: + +```ansi +$ zed permission check --explain document:firstdoc view user:fred +true +✓ document:firstdoc view (66.333µs) +├── ⨉ document:firstdoc writer (12.375µs) +└── ✓ document:firstdoc reader (20.667µs) + └── user:fred  +``` + +This command will also highlight which parts of the traversal were cached and if a [cycle] is detected. + +[cycle]: ./recursion-and-max-depth#recursion-in-relationships +[tracing header]: #checkpermission-tracing-header + +## SpiceDB GitHub Actions + +### [authzed/action-spicedb](https://github.com/marketplace/actions/spicedb-test-server) + +This GitHub Action runs the [SpiceDB Integration Test Server] for your workflows with the ability to configure different versions of SpiceDB. + +Here's an example snippet of a GitHub Workflow: + +```yaml +steps: + - uses: "authzed/action-spicedb@v1" + with: + version: "latest" +``` + +[SpiceDB Integration Test Server]: #integration-test-server + +### [authzed/action-spicedb-validate](https://github.com/marketplace/actions/validate-spicedb-schema) + + + **Info:** This tool is highly recommended because it can prevent deployments + of unverified changes. + + +The Playground offers a variety of tools that are useful for validating a design, but running the playground isn't designed for operating within a typical CI/CD environment. + +Zed provides a command for validation of files exported from the playground which is a perfect fit for being executed within a typical CI/CD environment. + +This GitHub Action runs the zed validation command on a provided file for your workflows. + +Here's an example snippet of a GitHub Workflow: + +```yaml +steps: + - uses: "actions/checkout@v4" + - uses: "authzed/action-spicedb-validate@v1" + with: + validationfile: "your-schema.yaml" +``` diff --git a/content/spicedb/ops/_meta.ts b/content/spicedb/ops/_meta.ts new file mode 100644 index 0000000..1cf5387 --- /dev/null +++ b/content/spicedb/ops/_meta.ts @@ -0,0 +1,13 @@ +import type { MetaRecord } from 'nextra' + +export default { + operator: "SpiceDB Kubernetes Operator", + "deploying-spicedb-operator": "Deploying the SpiceDB Kubernetes Operator", + eks: "Deploying to AWS EKS", + data: "Writing data to SpiceDB", + performance: "Improving Performance", + observability: "Observability Tooling", + "ai-agent-authorization": "Authorization for AI Agents", + "secure-rag-pipelines": + "Secure Your RAG Pipelines with Fine Grained Authorization", +} satisfies MetaRecord; diff --git a/content/spicedb/ops/ai-agent-authorization.mdx b/content/spicedb/ops/ai-agent-authorization.mdx new file mode 100644 index 0000000..755dbb3 --- /dev/null +++ b/content/spicedb/ops/ai-agent-authorization.mdx @@ -0,0 +1,60 @@ +import JupyterNotebookViewer from "@/components/JupyterNotebookViewer"; + +# Secure AI Agents with Fine Grained Authorization + +This guide shows how to build a secure Retrieval-Augmented Generation (RAG) pipeline where AI Agents can only access documents they are authorized for. +Authorization decisions are enforced by SpiceDB. +You can also get summary of only the documents the AI Agent is authorized to view. + +This guide uses OpenAI, Pinecone, LangChain, Jupyter Notebook and SpiceDB + +## Setup and Prerequisites + +- Access to a [SpiceDB](https://authzed.com/spicedb) instance. + You can find instructions for installing SpiceDB [here](https://authzed.com/docs/spicedb/getting-started/install/macos) +- A [Pinecone account](https://www.pinecone.io/) and API key +- An [OpenAI Platform account](https://platform.openai.com/docs/overview) and API key +- [Jupyter Notebook](https://jupyter.org/) running locally + +### Running SpiceDB + +Once you've installed SpiceDB, run a local instance with this command in your terminal: + +`spicedb serve --grpc-preshared-key "agents"` + +and you should see something like this that indicates an instance of SpiceDB is running locally: + +``` +user @ mac % spicedb serve --grpc-preshared-key "agents" +1:33PM INF configured logging async=false format=auto log_level=info provider=zerolog +1:33PM INF GOMEMLIMIT is updated GOMEMLIMIT=25769803776 package=github.com/KimMachineGun/automemlimit/memlimit previous=922 +3372036854775807 +1:33PM INF configured opentelemetry tracing endpoint= insecure=false provider=none sampleRatio=0.01 service=spicedb v=0 +1:33PM WRN this version of SpiceDB is out of date. See: https://github.com/authzed/spicedb/releases/tag/v1.44.4 latest-rele +ased-version=v1.44.4 this-version=v1.42.1 +1:33PM INF using memory datastore engine +1:33PM WRN in-memory datastore is not persistent and not feasible to run in a high availability fashion +1:33PM INF configured namespace cache defaultTTL=0 maxCost="32 MiB" numCounters=1000 +1:33PM INF schema watch explicitly disabled +1:33PM INF configured dispatch cache defaultTTL=20600 maxCost="13 MiB" numCounters=10000 +1:33PM INF configured dispatcher balancerconfig={"loadBalancingConfig":[{"consistent-hashring":{"replicationFactor":100,"sp +read":1}}]} concurrency-limit-check-permission=50 concurrency-limit-lookup-resources=50 concurrency-limit-lookup-subjects=5 +0 concurrency-limit-reachable-resources=50 +1:33PM INF grpc server started serving addr=:50051 insecure=true network=tcp service=grpc workers=0 +1:33PM INF configuration ClusterDispatchCacheConfig.CacheKindForTesting=(empty) ClusterDispatchCacheConfig.Enabled=true ClusterDispatchCacheConfig.MaxCost=70% ClusterDispatchCacheConfig.Metrics=true ClusterDispatchCacheConfig.Name=cluster_dispatch ClusterDispatchCacheConfig.NumCounters=100000 Datastore=nil DatastoreConfig.AllowedMigrations="(slice of size 0)" DatastoreConfig.BootstrapFileContents="(map of size 0)" DatastoreConfig.BootstrapFiles=[] DatastoreConfig.BootstrapOverwrite=false DatastoreConfig.BootstrapTimeout=10000 DatastoreConfig.ConnectRate=100 DatastoreConfig.CredentialsProviderName=(empty) DatastoreConfig.DisableStats=false DatastoreConfig.EnableConnectionBalancing=true DatastoreConfig.EnableDatastoreMetrics=true +1:33PM INF running server datastore=*schemacaching.definitionCachingProxy +1:33PM INF http server started serving addr=:9090 insecure=true service=metrics +1:33PM INF telemetry reporter scheduled endpoint=https://telemetry.authzed.com interval=1h0m0s next=38s +``` + +#### Download the Jupyter Notebook + +Clone the `workshops` [repository](https://github.com/authzed/workshops/) to your system and type `cd ai-agent-authorization` to enter the working directory. + +Start the `ai-agent-authz-v2.ipynb` Notebook locally by typing `jupyter ai-agent-authz-v2.ipynb` (or `python3 -m notebook`) in your terminal. + +## Add Fine Grained Authorization to AI Agents + +Here's the Jupyter Notebook with step-by-step instructions + + diff --git a/content/spicedb/ops/data/_meta.ts b/content/spicedb/ops/data/_meta.ts new file mode 100644 index 0000000..fd3f73d --- /dev/null +++ b/content/spicedb/ops/data/_meta.ts @@ -0,0 +1,7 @@ +import type { MetaRecord } from 'nextra' + +export default { + "bulk-operations": "Bulk Importing Relationships", + "writing-relationships": "Writing Relationships", + migrations: "Migrations", +} satisfies MetaRecord; diff --git a/content/spicedb/ops/data/bulk-operations.mdx b/content/spicedb/ops/data/bulk-operations.mdx new file mode 100644 index 0000000..99ea8ab --- /dev/null +++ b/content/spicedb/ops/data/bulk-operations.mdx @@ -0,0 +1,126 @@ +import { Tabs } from "nextra/components"; + +# Bulk Importing Relationships + +## Overview + +When setting up a SpiceDB cluster for the first time, there's often a data ingest process required to +set up the initial set of relations. +This can be done with [`WriteRelationships`](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.WriteRelationships) running in a loop, but you can only create 1,000 relationships (by default) at a time with this approach, and each transaction creates a new revision which incurs a bit of overhead. + +For faster ingest, we provide an [`ImportBulkRelationships`](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.PermissionsService.ImportBulkRelationships) call, which takes advantage of client-side gRPC streaming to accelerate the process and removes the cap on the number of relations that can be written at once. + +## Batching + +There are two batch sizes to consider: the number of relationships in a chunk written to the stream and the overall number of relationships in the lifetime of the request. +Breaking the request into chunks is a network optimization that makes it faster to push relationships from the client to the cluster. + +The overall number of relationships should reflect how many rows can easily be written in a single transaction by your datastore. +Note that you probably **don't** want to push all of your relationships through in a single request, as this could time out in your datastore. + +## Example + +We'll use the [authzed-dotnet](https://github.com/authzed/authzed-dotnet) client for this example. +Other client libraries will have different syntax and structures around their streaming and iteration, +but this should demonstrate the two different levels of chunking that we'll do in the process. + + + + ```csharp + var TOTAL_RELATIONSHIPS_TO_WRITE = 1000; + var RELATIONSHIPS_PER_TRANSACTION = 100; + var RELATIONSHIPS_PER_REQUEST_CHUNK = 10; + + // Start by breaking the full list into a sequence of chunks where each chunk fits easily + // into a datastore transaction. + var transactionChunks = allRelationshipsToWrite.Chunk(RELATIONSHIPS_PER_TRANSACTION); + + foreach (var relationshipsForRequest in transactionChunks) { + // For each of those transaction chunks, break it down further into chunks that + // optimize for network throughput. + var requestChunks = relationshipsForRequest.Chunk(RELATIONSHIPS_PER_REQUEST_CHUNK); + // Open up a client stream to the server for this transaction chunk + using var importCall = permissionsService.ImportBulkRelationships(); + foreach (var requestChunk in requestChunks) { + // For each network chunk, write to the client stream. + // NOTE: this makes the calls sequentially rather than concurrently; this could be + // optimized further by using tasks. + await importCall.RequestStream.WriteAsync(new ImportBulkRelationshipsRequest{ + Relationships = { requestChunk } + }); + } + // When we're done with the transaction chunk, complete the call and process the response. + await importCall.RequestStream.CompleteAsync(); + var importResponse = await importCall; + Console.WriteLine("request successful"); + Console.WriteLine(importResponse.NumLoaded); + // Repeat! + } + ``` + + + ```python + from itertools import batched + + TOTAL_RELATIONSHIPS_TO_WRITE = 1_000 + + RELATIONSHIPS_PER_TRANSACTION = 100 + RELATIONSHIPS_PER_REQUEST_CHUNK = 10 + + # NOTE: batched takes a larger iterator and makes an iterator of smaller chunks out of it. + # We iterate over chunks of size RELATIONSHIPS_PER_TRANSACTION, and then we break each request into + # chunks of size RELATIONSHIPS_PER_REQUEST_CHUNK. + transaction_chunks = batched( + all_relationships_to_write, RELATIONSHIPS_PER_TRANSACTION + ) + for relationships_for_request in transaction_chunks: + request_chunks = batched(relationships_for_request, RELATIONSHIPS_PER_REQUEST_CHUNK) + response = client.ImportBulkRelationships( + ( + ImportBulkRelationshipsRequest(relationships=relationships_chunk) + for relationships_chunk in request_chunks + ) + ) + print("request successful") + print(response.num_loaded) + ``` + + + + +The code for this example is [available here](https://github.com/authzed/authzed-dotnet/blob/main/examples/bulk-import/BulkImport/Program.cs). + +## Retrying and Resuming + +`ImportBulkRelationships`'s semantics only allow the creation of relationships. +If a relationship is imported that already exists in the database, it will error. +This can be frustrating when populating an instance if the process fails with a retryable error, such as those related to transient +network conditions. +The [authzed-go](https://github.com/authzed/authzed-go) client offers a [`RetryableClient`](https://github.com/authzed/authzed-go/blob/main/v1/retryable_client.go) +with retry logic built into its `ImportBulkRelationships` logic. + +This is used internally by [zed](https://github.com/authzed/zed) and is exposed by the `authzed-go` library, and works by +either skipping over the offending batch if the `Skip` strategy is used or falling back to `WriteRelationships` with a touch +semantic if the `Touch` strategy is used. +Similar logic can be implemented using the other client libraries. + +## Why does it work this way? + +SpiceDB's `ImportBulkRelationships` service uses [gRPC client streaming] as a network optimization. +It **does not** commit those relationships to your datastore as it receives them, but rather opens a database transaction +at the start of the call and then commits that transaction when the client ends the stream. + +This is because there isn't a good way to handle server-side errors in a commit-as-you-go approach. +We take this approach because if we were to commit each chunk sent over the network, the semantics +of server-side errors are ambiguous. +For example, you might receive an error that closes the stream, but that doesn't necessarily mean +that the last chunk you sent is where the error happened. +The error source could be sent as error context, but error handling and resumption would be difficult and cumbersome. + +A [gRPC bidirectional streaming](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc) approach could +help address this by ACKing each chunk individually, but that also requires a good amount of bookkeeping on the client to ensure +that every chunk that's written by the client has been acknowledged by the server. +Requiring multiple client-streaming requests means that you can use normal language error-handling flows +and know exactly what's been written to the server. + +[gRPC client streaming]: https://grpc.io/docs/what-is-grpc/core-concepts/#client-streaming-rpc diff --git a/content/spicedb/ops/data/migrations.mdx b/content/spicedb/ops/data/migrations.mdx new file mode 100644 index 0000000..df04f9a --- /dev/null +++ b/content/spicedb/ops/data/migrations.mdx @@ -0,0 +1,53 @@ +import { Callout } from "nextra/components"; + +# Migrations + +## Migrating from SpiceDB to SpiceDB + + + This section covers migrating data from one SpiceDB instance to another. If + you need information about migrating the schema of a datastore underlying + SpiceDB, like Postgres or CockroachDB, go + [here](/spicedb/concepts/datastore-migrations#migrations). If you need + information about making changes to a SpiceDB schema that result in a + migration, go [here](/spicedb/modeling/migrating-schema). + + + + Migrating data at the underlying database level is not recommended and + impossible in some cases. Using tools like `pg_dump`/`pg_restore` will break + SpiceDB MVCC. Additionally, if you are migrating to a SpiceDB with a different + datastore type (e.g. Postgres -> CockroachDB), you ***must*** use the SpiceDB + APIs (`exportBulk`/`importBulk` or `zed backup`) to backup and restore. + + +The options provided below enable you to consistently migrate between datastores with minimal downtime. + +All options utilize the [Zed CLI tool](https://github.com/authzed/zed#readme). + +### Write Downtime Migration + +A simple migration that will incur write downtime (not read downtime) for the duration between starting the export and finishing the import. + +1. Spin up your new SpiceDB. +2. Stop writes to your old SpiceDB. +3. Run `zed backup create ` against the old SpiceDB. +4. Run `zed backup restore ` against the new SpiceDB with the backup file generated by the previous command. +5. Switch reads to the new SpiceDB. +6. Start writing to the new SpiceDB. + +### Near Zero Downtime Migration + +This option involves more complexity than the above option. + +1. Spin up your new SpiceDB. +2. Run `zed backup create ` against your old SpiceDB. +3. Run `zed backup restore ` against the new SpiceDB with the backup file generated by the previous command. +4. Obtain the zed token that points to the backup revision with `zed backup parse-revision `. +5. Using the [watch API](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.WatchRequest), consume relationship changes from the old SpiceDB and write them to the new SpiceDB. + You should provide the zed token that you obtained in the previous request as the `optional_start_cursor` in the `WatchRequest`. + You will need to build a small worker to consume changes and write them to the new SpiceDB. + The worker needs to continuously consume changes until you are ready to turn off writes on the old SpiceDB. +6. Stop writes to the old SpiceDB. +7. Wait for the worker to process all changes. (this is when write downtime will occur) +8. Switch reads and writes to the new SpiceDB. diff --git a/content/spicedb/ops/data/writing-relationships.mdx b/content/spicedb/ops/data/writing-relationships.mdx new file mode 100644 index 0000000..51e5b77 --- /dev/null +++ b/content/spicedb/ops/data/writing-relationships.mdx @@ -0,0 +1,112 @@ +import { Tabs } from "nextra/components"; +import { Callout } from "nextra/components"; + +# Writing relationships + +This page will provide some practical recommendations for writing relationships to SpiceDB. +If you are interested in relationships as a concept, check out this [page](/spicedb/concepts/relationships). + +## Retries + +When making requests to SpiceDB, it's important to implement proper retry logic to handle transient failures. [SpiceDB APIs use gRPC\*](/spicedb/getting-started/client-libraries), which can experience various types of temporary failures that can be resolved through retries. + +Retries are recommended for all gRPC methods, not just WriteRelationships. + +\*SpiceDB can also expose an [HTTP API](/spicedb/getting-started/client-libraries#http-clients); however, gRPC is recommended. + +### Implementing Retry Policies + +You can implement your own retry policies using the gRPC Service Config. +Below, you will find a recommended Retry Policy. + +``` +"retryPolicy": { + "maxAttempts": 3, + "initialBackoff": "1s", + "maxBackoff": "4s", + "backoffMultiplier": 2, + "retryableStatusCodes": [ + 'UNAVAILABLE', 'RESOURCE_EXHAUSTED', 'DEADLINE_EXCEEDED', 'ABORTED', + ] +} +``` + +This retry policy configuration provides exponential backoff with the following behavior: + +**`maxAttempts: 3`** - Allows for a maximum of 3 total attempts (1 initial request + 2 retries). +This prevents infinite retry loops while giving sufficient opportunity for transient issues to resolve. + +**`initialBackoff: "1s"`** - Sets the initial delay to 1 second before the first retry attempt. +This gives the system time to recover from temporary issues. + +**`maxBackoff: "4s"`** - Caps the maximum delay between retries at 4 seconds to prevent excessively long waits that could impact user experience. + +**`backoffMultiplier: 2`** - Doubles the backoff time with each retry attempt. +Combined with the other settings, this creates a retry pattern of: 1s → 2s → 4s. + +**`retryableStatusCodes`** - Only retries on specific gRPC status codes that indicate transient failures: -`UNAVAILABLE`: SpiceDB is temporarily unavailable -`RESOURCE_EXHAUSTED`: SpiceDB is overloaded -`DEADLINE_EXCEEDED`: Request timed out -`ABORTED`: Operation was aborted, often due to conflicts that may resolve on retry + +You can find a python retry example [here](https://github.com/authzed/examples/blob/main/data/retry/main.py). + +## Writes: Touch vs Create + +A SpiceDB [relationship update](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.RelationshipUpdate) can use one of three operation types `CREATE`, `TOUCH`, OR `DELETE`. +This section will cover `CREATE` and `TOUCH`. +You can read more about `DELETE` in [the section below](#deleting-relationships). + +### Understanding the Operations + +**`CREATE`** - Inserts a new relationship. +If the relationship already exists, the operation will fail with an error. + +**`TOUCH`** - Upserts a relationship. +If the relationship already exists, it will do nothing. +If it doesn't exist, it will create it. + +### Key Differences + +| Operation | Behavior on Existing Relationship | Performance | Use Case | +| --------- | --------------------------------- | ------------------------ | ----------------------------- | +| `CREATE` | Fails with error | Faster (single insert) | Initial relationship creation | +| `TOUCH` | Updates/overwrites | Slower (delete + insert) | Idempotent operations | + +### Special Considerations + +**Expiring Relationships:** When working with [expiring relationships](/spicedb/concepts/expiring-relationships), always use `TOUCH`. +If a relationship has expired but hasn't been garbage collected yet, using `CREATE` will return an error. + +**Error Handling:** When using `CREATE`, be prepared to handle duplicate relationship errors appropriately in your application logic. + +## Deleting Relationships + +SpiceDB provides two methods for deleting relationships: using the `WriteRelationships` API with the `DELETE` operation or using the `DeleteRelationships` API. +Each approach has different behaviors and use cases. + +### WriteRelationships with the `DELETE` Operation + +The [`WriteRelationships`](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.WriteRelationshipsRequest) API supports a `DELETE` operation type that allows you to remove specific relationships as part of a batch of relationship updates. + +**`DELETE`** - Removes a relationship. +If the relationship does not exist, the operation will silently succeed (no-op). + +#### Characteristics + +- **Atomic Operations**: Can be combined with other relationship operations (`CREATE`, `TOUCH`) in a single atomic transaction +- **Granular Control**: Delete specific relationships alongside creating or updating others +- **Silent Failure**: Does not fail if the relationship doesn't exist +- **Batch Limit**: Subject to the same batch size limits as other `WriteRelationships` operations (1,000 updates by default) + +### DeleteRelationships API + +The [`DeleteRelationships`](https://buf.build/authzed/api/docs/main:authzed.api.v1#authzed.api.v1.DeleteRelationshipsRequest) API is a dedicated method for bulk deletion of relationships based on filters rather than specifying individual relationships. + +#### Characteristics + +- **Filter-Based**: Delete relationships based on resource type, relation, subject type, or combinations thereof +- **Bulk Operations**: Can delete many relationships matching the filter criteria in a single call +- **Separate Transaction**: Operates independently from `WriteRelationships` +- **Efficient for Mass Deletion**: Optimized for removing large numbers of relationships + +## Bulk Import + +Check out [Bulk Importing Relationships](./bulk-operations) diff --git a/content/spicedb/ops/deploying-spicedb-operator.mdx b/content/spicedb/ops/deploying-spicedb-operator.mdx new file mode 100644 index 0000000..a83039e --- /dev/null +++ b/content/spicedb/ops/deploying-spicedb-operator.mdx @@ -0,0 +1,135 @@ +import { Callout, Steps } from "nextra/components"; + +# Deploying the SpiceDB Operator + +The [SpiceDB Operator] is the best way to run SpiceDB in production. + +This guide will walk you through the steps to deploy the SpiceDB Operator to a [Kubernetes] cluster and confirm it's functional by creating a simple SpiceDB deployment. + +[SpiceDB Operator]: ../ops/operator +[Kubernetes]: https://kubernetes.io + +## Steps + + + +### Create or Configure a Kubernetes Cluster + +The rest of this guide assumes [kubectl] is configured to use an available Kubernetes cluster. + +For production use-cases, we recommend using your cloud provider's managed Kubernetes services (e.g. [EKS], [GKE], or [AKS]). + +If you want to run a Kubernetes cluster locally, we recommend one of: + +- [kind](https://kind.sigs.k8s.io) +- [OrbStack](https://orbstack.dev) +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) +- [minikube](https://minikube.sigs.k8s.io) + +[kubectl]: https://kubernetes.io/docs/tasks/tools/#kubectl +[EKS]: https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html +[GKE]: https://cloud.google.com/kubernetes-engine/ +[AKS]: https://learn.microsoft.com/en-us/azure/aks/ + +### Applying the Operator manifests + +Before modifying any cluster, we recommend double-checking that your current context is configured for the target cluster: + +```sh +kubectl config current-context +``` + +Now you're ready to apply the manifests that install the SpiceDB Operator: + +```sh +kubectl apply --server-side -k github.com/authzed/spicedb-operator/config +``` + +All resources are created in the `spicedb-operator` namespace. + +If you'd like to confirm that the deployment is running, you can run the following command: + +```sh +kubectl -n spicedb-operator get pods +``` + +### Create a SpiceDBCluster + +You can now create and configure SpiceDB clusters by applying `SpiceDBCluster` resources. + + + The following manifests configure a simple deployment, not a secure one. + +Do not use these values in production. + + + +Apply a SpiceDBCluster and required [Secret] using the following command: + +```sh +kubectl apply --server-side -f - < diff --git a/content/spicedb/ops/eks.mdx b/content/spicedb/ops/eks.mdx new file mode 100644 index 0000000..5a446e0 --- /dev/null +++ b/content/spicedb/ops/eks.mdx @@ -0,0 +1,336 @@ +import { Callout, Steps } from "nextra/components"; +import YouTube from "react-youtube"; + +# Installing SpiceDB on Amazon EKS + +Amazon Elastic Kubernetes Service or [Amazon EKS] is the managed Kubernetes service provided by Amazon and is the best way to run SpiceDB on AWS. + +This guide walks through creating a highly-available SpiceDB deployment on an existing EKS cluster. +TLS configuration is managed by [cert-manager] and [Amazon Route53]. + +[Amazon EKS]: https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html +[cert-manager]: https://cert-manager.io +[Amazon Route53]: https://aws.amazon.com/route53/ + +## Steps + + + +### Confirming the Prerequisites + +The rest of this guide assumes [kubectl] is configured to use an operational EKS cluster along with a Route53 External Hosted Zone. + +[kubectl]: https://kubernetes.io/docs/tasks/tools/#kubectl + +### Creating an IAM Policy + +In order for the cluster to dynamically configure DNS, the first step is grant access via the creation of an IAM Policy and attach it to the role used for the pods in EKS: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "route53:GetChange", + "Resource": "arn:aws:route53:::change/*" + }, + { + "Effect": "Allow", + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets" + ], + "Resource": "arn:aws:route53:::hostedzone/*" + }, + { + "Effect": "Allow", + "Action": "route53:ListHostedZonesByName", + "Resource": "*" + } + ] +} +``` + +### Deploying cert-manager + +Before modifying any cluster, we recommend double-checking that your current context is configured for the target cluster: + +```sh +kubectl config current-context +``` + +Now you're ready to apply the manifests that install cert-manager: + +```sh +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml +``` + +If you'd like to use another installation method for cert-manager, you can find more in the [cert-manager documentation][cert-manager-docs]. + +When cert-manager is operational, all the pods should be healthy provided the following command: + +```sh +kubectl -n cert-manager get pods +``` + +[cert-manager]: https://cert-manager.io +[cert-manager-docs]: https://cert-manager.io/docs/installation + +### Creating a Namespace + +Next up, we'll create a new [Namespace] for deploying SpiceDB. + +This guide uses one named `spicedb`, but feel free to use replace this throughout the guide with whatever you prefer. + +```sh +kubectl apply --server-side -f - < spicedb-config.yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev #Change optional: you can change this name, but be mindful of your dispatch TLS certificate URL and service selector +spec: + config: + datastoreEngine: postgres + replicas: 2 #Change optional: at least two replicas are required for HA + tlsSecretName: spicedb-le-tls + dispatchUpstreamCASecretName: dispatch-root-secret + dispatchClusterTLSCertPath: "/etc/dispatch/tls.crt" + dispatchClusterTLSKeyPath: "/etc/dispatch/tls.key" + secretName: dev-spicedb-config + patches: + - kind: Deployment + patch: + spec: + template: + spec: + containers: + - name: spicedb + volumeMounts: + - name: custom-dispatch-tls + readOnly: true + mountPath: "/etc/dispatch" + volumes: + - name: custom-dispatch-tls + secret: + secretName: dispatch-root-secret +--- +apiVersion: v1 +kind: Secret +metadata: + name: dev-spicedb-config +stringData: + preshared_key: "averysecretpresharedkey" #Change: this is your API token definition and should be kept secure. + datastore_uri: "postgresql://user:password@postgres.com:5432" #Change: this is a Postgres connection string +EOF +``` + +- Apply the above configuration with: `kubectl apply -f spicedb-config.yaml -n spicedb` + +### Deploy Cloud Load Balancer Service + +This step will deploy a service of type load balancer, which will deploy an external AWS load balancer to route traffic to our SpiceDB pods. + +```yaml +cat << EOF > spicedb-lb.yaml +apiVersion: v1 +kind: Service +metadata: + name: spicedb-external-lb + namespace: spicedb +spec: + ports: + - name: grpc + port: 50051 + protocol: TCP + targetPort: 50051 + - name: gateway + port: 8443 + protocol: TCP + targetPort: 8443 + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/instance: dev-spicedb #Change optional: in this example, "dev" is the name of the SpiceDBCluster object. If you didn't use "dev", change "dev" to what you used. + sessionAffinity: None + type: LoadBalancer +EOF +``` + +- Apply the above configuration with: `kubectl apply -f spicedb-lb.yaml` +- Run the following command to get the External-IP of the load balancer: `kubectl get -n spicedb services spicedb-external-lb -o json | jq '.status.loadBalancer.ingress[0].hostname'` +- Take the output of the command and add it as a C-Name record in your Route 53 Hosted Zone. **Note**: Ensure that it's added to the record containing the `dnsNames` that was specified while creating an Issuer & Certificate in Step 5. + +### Test + +You can use the Zed CLI tool to make sure everything works as expected: + +`zed context set eks-guide demo.example.com:50051 averysecretpresharedkey` + +Write a schema + +```yaml +zed schema write <(cat << EOF +definition user {} + +definition doc { + relation owner: user + permission view = owner +} +EOF +) +``` + +Write a relationship: + +```yaml +zed relationship create doc:1 owner user:emilia +``` + +Check a permission: + +```yaml +zed permission check doc:1 view user:emilia +``` + + + +Here's a YouTube video that describes the above steps: + + diff --git a/content/spicedb/ops/load-testing.mdx b/content/spicedb/ops/load-testing.mdx new file mode 100644 index 0000000..27eed4f --- /dev/null +++ b/content/spicedb/ops/load-testing.mdx @@ -0,0 +1,376 @@ +import { Callout, Tabs } from "nextra/components"; +import YouTube from "react-youtube"; + +import { InlinePlayground } from "@/components/playground"; + +# Load Testing SpiceDB + +Performing load testing against SpiceDB is an important step in verifying that you can meet your performance requirements. +The performance characteristics of SpiceDB are nuanced and there are many important considerations to keep in mind when performing a realistic load test. +This document is intended to help you understand the performance basics of SpiceDB and offer suggestions for building a load test that can accurately reflect your workload. + +## Seeding Data for SpiceDB Load Tests + +### Relationship Data Distribution + +The cardinality of your relationships (how many or how few objects are related to another object) significantly impacts the computational cost of CheckPermission and Lookup requests. +Because of this, it's essential to seed relationships for your load test that relate objects together in a way that resembles real-world conditions. + +Every CheckPermission request to SpiceDB is broken into multiple sub-problems that are evaluated in parallel. +The more sub-problems SpiceDB has to compute, the more time it will take SpiceDB to issue a response to a CheckPermission request. +High relationship cardinality often means that SpiceDB will need to spend more time computing sub-problems. + +The below schema, relationships, and CheckPermission request are an example of how relationship cardinality can affect performance. + +
+ + +If the following CheckPermissionRequest occurred on the above schema and set of relationships, SpiceDB had to compute the answer to three sub-problems in order to determine if evan has `view` permission on `document:somedocument`. + +The sub-problems are: + +1. Is `evan` related to `group:1` as a `viewer`? +2. Is `evan` related to `group:2` as a `viewer`? +3. Is `evan` related to `group:3` as a `viewer`? + +```proto +CheckPermissionRequest { + resource: ObjectReference { + object_type: 'document' + object_id: 'somedocument' + } + permission: 'view' + subject: SubjectReference{ + object: ObjectReference { + object_type: 'user' + object_id: 'evan' + } + } +} +``` + +The concept of computing sub-problems for intermediary objects, like we did for groups, is referred to as "fanning out" or "fanout". +If more groups were related, to `document:somedocument`, it's likely that more subproblems will be calculated to determine the `view` permission; however, the query will stop calculating sub-problems (also known as "short circuit") if it positively satisfies the permission check before all paths are exhausted. + +Notable exceptions to short-circuiting (thus negatively impacting overall performance) are usages of [Intersections (&)] and [Exclusions (-)]. + +While fanout is mostly unavoidable, it can result in an exponential increase to the number of sub-problems that must be computed. +Therefore, when performing a load test it's critical to have a SpiceDB instance seeded with relationship data that closely mimics real world data. + +[Intersections (&)]: ../concepts/schema#-intersection +[Exclusions (-)]: ../concepts/schema#--exclusion + +### Identifying Relationship Distribution Patterns + +After reading the above section, you should have an understanding of why relationship data distribution matters. +This section offers tips for getting the relationship distribution correct when seeding relationship data for a load test. +Here are some helpful tips (these tips assume you already have a schema finalized): + +- Generate a list of object types and their relationships. + In the example above we have two: `document#group@group` and `group#viewer@user`. +- Identify how many objects of each type exist. + In our scenario, we’ll need to determine how many objects of type `document`, `group`, and `user` exist. +- If a resource object has multiple relations defined on it, decide what percentage of resource objects have a relationship defined with the first relation, what percentage of resource objects will have a relationship defined with the second relation, and so on. + Keep in mind that it’s possible for resource objects to have relationships written from multiple relations (e.g. a single resource object can have a relationship(s) written for both the first and second relation). +- Identify the distribution of the relationships for specific relations. + You’ll need to identify what percentage of the resource objects relate to what percentage of the subject objects through a specific relation. + If you can’t identify the distribution of your relationships for specific relations, we recommend using a Pareto distribution as an approximation (e.g. 80% of `document` objects are related to 20% of `group` objects through the `group` relation). +- Codify it! + Now that you’ve completed the thought exercise, you’ll need to codify this information in relationship generating code. + + + **Info:** If you're struggling or want help, [reach out to the Authzed + team][reach out] or [the community in Discord][discord]. + + +We highly recommend that you pre-seed your relationships data before a load test. + +Any writes during a load test should be used for understanding write performance. + +Writes during a load test should not be used for seeding data that will be used by CheckPermission and LookUp requests. +If you have any questions or need help thinking through your data distribution. + +[reach out]: https://authzed.com/contact-us +[discord]: https://authzed.com/discord + +## Load Testing SpiceDB Checks + +The specific resources, permissions, and subjects that are checked in a request can have a significant impact on SpiceDB performance. + +### SpiceDB Check Cache Utilization + +Every time a SpiceDB subproblem is computed, it is cached for the remainder of the current quantization interval window (5s by default). +Subsequent requests (checks or lookups) that use the same revision and require a subset of the cached subproblems can fetch the pre-computed subproblems from an in memory cache instead of fetching the subproblem data from the datastore and computing the subproblem. +Therefore, every time a cached SpiceDB sub-problem is used, computation is avoided, load on the datastore is avoided, and a roundtrip to the datastore is avoided. +You can read about cache configurations in the [configuration section][cs] below. + +[cs]: #spicedb-configuration-performance + +### SpiceDB Check Fanout Impact + +In the [Relationship Data Distribution section][dd], fanout was discussed. +Certain CheckPermission requests will require more fanout to compute a response; therefore, it’s important to spread your checks out across a realistic sample of subjects, resources, and permissions. + +[dd]: #relationship-data-distribution + +### SpiceDB Check Sample Size + +Will your entire user base be online at the same time? +Probably not. +Therefore, you should only issue CheckPermission request for a subset of your user base. +This subset should represent the largest number of users online at a given time. +Choosing a percentage of your users will help simulate a more accurate cache hit ratio. + +### SpiceDB Check Negative Check Performance + +Negative checks (requests returning `PERMISSIONSHIP_NO_PERMISSION`) are almost always more computationally expensive than positive checks because negative checks will walk every branch of the graph while searching in vain for a satisfactory answer. +For most SpiceDB deployments, a significant majority of checks are positive. + +### Identifying SpiceDB Check Distribution Patterns + +These steps will help you design a realistic CheckPermission Request distribution: + +1. As mentioned above, determine what percentage of users will be online at a given time and only check that subset of your user base. +2. It's important that you pick a subset of users and resources that are a representative distribution of real world checks (e.g. don’t just check a subset of users that have access to a lot of resources). + Check users that have access to a representative amount of resources. + Some users may have access to many resources, some users may have access to few resources. +3. Identify distributions for particular object types. + If you can't determine your distributions, you may want to use a Pareto distribution as an approximation (e.g. 20% of a particular resource type accounts for 80% of the checks or 20% of the users account for 80% of the checks). + +## Load Testing SpiceDB Lookups + +Almost always, Lookups are more computationally expensive than checks. +To put it simply, more subproblems need to be calculated to answer lookups compared to checks. +Lookups that traverse intersections and exclusions are more expensive than lookups that do not. +Lookups can both use the cache and populate the cache with subproblems from and for checks. + +### Identifying SpiceDB Lookup Subjects Distribution Patterns + +Identify your distribution of Lookups (e.g. 20% of resource objects will account for 80% of lookups or `permission x` will account for 90% of lookups and `permission y` will account for 10% of lookups). +You should be thoughtful about how you distribute your requests among your users as some users will have more computationally expensive requests than others. + +### Identifying SpiceDB Lookup Resources Distribution Patterns + +Identify a subset of your user base that will be online at a given time and only perform requests for those users in your test. +You should be thoughtful about how you distribute your requests among your users as some user's will have more computationally expensive requests than others. + +## Load Testing SpiceDB Writes + +Writes impact datastore performance far more than they impact SpiceDB node performance. + + + **Warning:** + CockroachDB Datastore users have [special considerations][crdb] to understand. + +[crdb]: ../concepts/datastores#overlap-strategy + + + + + **Info:** + Postgres Datastore users should note that `CREATE` is always more performant than `TOUCH`. + +`TOUCH` always performs a delete and then an insert, while `CREATE` performs an insert, throwing an error if the data already exists. + + + +### Identifying SpiceDB Write Distribution Patterns + +We recommend that you pre-seed your relationships data before a load test. +Writes during a load test should be used for testing write performance. +Writes during a load test should not be used for seeding data that will be used by CheckPermission and Lookup requests. + +Writes should be a part of every thorough load test. +When designing your load tests, we recommend you quantify your writes in one of two ways: + +1. As a percentage of overall requests (i.e .5% of requests are writes) +2. As a number of writes per second (i.e. 30 writes per second) + +In most circumstances, the resource, subject, and permission of the relationships you write will have no impact on performance. + +## SpiceDB Schema Performance + +Performance should not be a primary concern when you are modeling your schema. +We recommend that you first model your schema to satisfy your business requirements. +After your business requirements have been satisfied by your schema, you can examine the following points to see if there is any room for optimization. + +We highly recommend that you [get in touch][call] for a schema review session before performing a load test. + +[call]: https://authzed.com/call + +### SpiceDB Caveats Performance + +In general, we only recommend using caveats when you’re evaluating dynamic data (e.g. users can only access the document between 9-5 or users can only access the document when you are in the EU). +Almost all other scenarios can be represented in the graph. +Caveats add computational cost to checks because their evaluations can't be cached. + +### SpiceDB Nesting & Recursion Performance + +If SpiceDB has to walk more hops on the graph, it will have to compute more subproblems. +For example, recursively relating folder objects two layers deep will not incur much of a performance penalty by itself. +Recursively relating folder objects 30 layers deep is likely to incur a performance penalty. +The same can be said by nesting objects of different object definitions 30 layers deep. + +### SpiceDB Intersections and Exclusions Performance + +Intersections and exclusions can have a small negative impact on Check performance because they force SpiceDB to look for "permissionship" on both sides of the intersection or exclusion. + +Intersections and exclusions can have a significant negative performance impact on LookupResources because they require SpiceDB to compute a candidate set of subjects to the left of the intersection or exclusion and then to perform a check on all candidates in the set. + +## SpiceDB Configuration Performance + +There are certain SpiceDB configuration settings and request parameters that should be considered before a load test. + +### SpiceDB Quantization Performance + +The SpiceDB quantization interval setting is used to specify the window of time in which we should use a chosen revision of SpiceDB’s datastore data. +Effectively, the `datastore-revision-quantization-interval` determines how long cached results will live. +There is also the `datastore-revision-quantization-max-staleness-percent` setting. +The `datastore-revision-quantization-max-staleness-percent` specifies the percentage of the revision quantization interval where we may opt to select a stale revision for performance reasons. +Increasing both or either numbers is likely to increase cache utilization which will lead to better performance. + +Read [this article](https://authzed.com/blog/hotspot-caching-in-google-zanzibar-and-spicedb) for more information on the staleness and performance implications of quantization. + +### SpiceDB Consistency Performance + +Consistency has a significant effect on cache utilization and thus performance. +Cache utilization is specified on a per request basis. +Before conducting a load test, it’s important you understand the performance and staleness implications of the consistency message(s) you are using. +The majority of SpiceDB users are using `minimize_latency` for every request. +The Authzed team almost always recommends against `fully_consistent`, in lieu of `fully_consistent` we recommend using `at_least_as_fresh` so that you can utilize the cache when it’s safe to do so. + +You can read more about consistency [here](../concepts/consistency). + +## Load Generation using Thumper + +### Overview + +We have an in-house load generator called [Thumper][thumper]. +We recommend you build your load tests with Thumper. +Thumper allows you to distribute checks, lookups, and writes as you see fit. +Thumper has been engineered by the Authzed team to provide a realistic and even flow of requests. +If you use Thumper, it will be easier for the Authzed team to understand your load test and help you make adjustments. + +[thumper]: https://github.com/authzed/thumper + +### Writing Scripts + +The arguments to `thumper migrate` or `thumper run` are script files, which +contain one or more scripts. +The scripts look something like this: + +```yaml +--- +# An identifier for the script +name: "check" + +# The relative weights of the scripts within a run. If multiple scripts are defined +# for the same run, each time the runner executes, it will select a random script +# according to the relative weight. If there is only one script, set this to 1. +weight: 40 + +# The steps block defines a series of actions that will happen in order on each +# execution of this script. +steps: + # The ops available are a subset of the full set of available gRPC endpoints. + # Most of the main ones are available and there are examples in the scripts + # folder of the repo. + - op: "CheckPermission" + # This is Golang template syntax and various values from the runtime can be + # inserted. Documentation of the available values is available in the readme: + # https://github.com/authzed/thumper#go-template-properties + resource: "{{ .Prefix }}resource:firstdoc" + subject: "{{ .Prefix }}user:tom" + permission: "view" + +# Triple-dash means this is a separate yaml document within the same file +--- +name: "read" +weight: 30 +steps: + - op: "ReadRelationships" + resource: "{{ .Prefix }}resource:firstdoc" + numExpected: 2 +``` + +### Running the scripts + +You can use `thumper migrate` to run a script once for setup purposes. +This is often used to write a schema and an initial set of relationships. + +```sh +thumper migrate --endpoint spicedb:50051 --token t_some_token --insecure true ./scripts/schema.yaml +``` + +You can then start the runner with: + +```sh +thumper run --endpoint spicedb:50051 --token t_some_token --insecure true ./scripts/example.yaml +``` + +Modify the options above to suit your environment. + +### Changing the Load + +By default, the runner selects one script from the set and runs it once per second. +To increase the volume of requests by a single runner, use the `THUMPER_QPS` environment variable or the `--qps` flag: + +```sh +thumper run --token presharedkeyhere --qps 5 +``` + +The above will spawn 5 goroutines, which will each issue calls once per second. + +### Configuration + +Similar to SpiceDB, you can either use command line flags or environment variables to supply options. +All flags are discoverable with `thumper --help`. +Any flag can be converted to its environment variable by capitalizing and prepending with `THUMPER_`. +For example, `--qps` can be configured via the `THUMPER_QPS` environment variable. + +### Monitoring the Thumper Process + +To help understand the throughput and behavior of your load test, Thumper exposes a metrics endpoint +at `:9090/metrics` in Prometheus format. +This can be scraped by your metrics framework to provide insight into your tests. + +## Monitoring a SpiceDB Load Test + +SpiceDB metrics can help you: + +1. ensure you're generating the correct number of requests per second +2. give you information on request latency +3. help you fine tune your load test + +The easiest way to consume and view metrics is via the AuthZed Dedicated Management Console. +There the metrics are preconfigured for you. +If you’re self hosting SpiceDB (not recommended for a load test) you can export metrics via the SpiceDB Prometheus endpoint. + +## Scaling SpiceDB + +### Scaling the SpiceDB Datastore + +When operating a SpiceDB cluster you must keep in mind the underlying DB. +By far, the most common sign you need to scale your datastore up or out is high datastore CPU utilization. + +### Scaling the SpiceDB Compute + +Like the datastore, it’s important to scale if CPU utilization is high; however, since SpiceDB is such a performance sensitive workload, we've seen significant performance gains from scaling out CPU in nodes that we’re experiencing less than 30% CPU utilization. + +## How Authzed helps with Load Testing SpiceDB + +The Authzed team has experience running [massive SpiceDB load tests][scale-test] and we want to help you with your load test too. + +Here's a few examples of how we can help: + +- Review your schema to make sure it’s fully optimized +- Review or help you create scripts to seed your relationship data +- Review or help you create scripts to generate CheckPermission and Lookup traffic +- Provide you a trial of AuthZed Dedicated, our private SaaS offering + - During the trial, we can help you make adjustments and optimizations + - If you decide to move forward with AuthZed Dedicated after the trial, you can keep using your trial cluster that we fine-tuned and right sized with you during the trial + - If you decide to self host SpiceDB Enterprise after the trial, we’ll provide you a write up of all of the optimizations we made during the trial and the amount of hardware we used so you can deploy a SpiceDB cluster that has been optimized for your workload into your environment + +If you’d like to schedule some time for a consultation from the Authzed team, please do so [here][call]. + +[scale-test]: https://authzed.com/blog/google-scale-authorization diff --git a/content/spicedb/ops/observability.mdx b/content/spicedb/ops/observability.mdx new file mode 100644 index 0000000..b5ac042 --- /dev/null +++ b/content/spicedb/ops/observability.mdx @@ -0,0 +1,159 @@ +import { Callout, Tabs } from "nextra/components"; +import YouTube from "react-youtube"; + +# Observability Tooling + +In order to operate SpiceDB in a reliable and performant fashion, SpiceDB exposes various forms of [observability] metadata. + +[observability]: https://www.ibm.com/topics/observability + +## Prometheus + +Every SpiceDB [command] has a configurable HTTP server that serves observability data. + +A [Prometheus metrics endpoint][prom-endpoint] can be found on this server at the path `/metrics`. + +Available metrics include operational information about the Go runtime and serving metrics for any servers that are enabled. + +[command]: /spicedb/concepts/commands +[prom-endpoint]: https://prometheus.io/docs/concepts/jobs_instances/ + +## Profiling + +Every SpiceDB [command] has a configurable HTTP server that serves observability data. + +[pprof endpoints][go-pprof] for various types of profiles can be found on this server at the path `/debug/pprof`. + +The types of profiles available are: + +- **cpu**: where a program spends its time while actively consuming CPU cycles +- **heap**: monitor current and historical memory usage, and to check for memory leaks +- **threadcreate**: sections of the program that lead the creation of new OS threads +- **goroutine**: stack traces of all current goroutines +- **block**: where goroutines block waiting on synchronization primitives (including timer channels) +- **mutex**: lock contention + +For example, to download a CPU profile, you can run the following command: + +```sh +go tool pprof 'http://spicedb.local:9090/debug/pprof/profile' +``` + +This will download the profile to `$HOME/pprof` and drop you into a REPL for exploring the data. + +Alternatively, you can upload profiles to [pprof.me][pprofme] to share with others. + +[command]: /spicedb/concepts/commands +[go-pprof]: https://pkg.go.dev/runtime/pprof +[pprofme]: https://pprof.me + +## OpenTelemetry Tracing + +SpiceDB uses [OpenTelemetry][otel] for [tracing] the lifetime of requests. + +You can configure the tracing in SpiceDB via [command flags], prefixed with `otel`. + +Here's a video walking through SpiceDB traces using [Jaeger]: + + + +[otel]: https://opentelemetry.io +[tracing]: https://opentelemetry.io/docs/concepts/signals/traces/ +[command flags]: /spicedb/concepts/commands +[Jaeger]: https://www.jaegertracing.io + +## Structured Logging + +SpiceDB emits logs to [standard streams][stdinouterr] using [zerolog]. + +Logs come in two formats (`console`, `JSON`) and can be configured with the `--log-format` [command flag]. + +If a output device is non-interactive (i.e. not a terminal) these logs are emitted in [NDJSON] by default. + +Here's a comparison of the logs starting up a single SpiceDB v1.25 instance: + + + + ```ansi + 8:00PM INF configured logging async=false format=console log_level=info provider=zerolog + 8:00PM INF configured opentelemetry tracing endpoint= insecure=false provider=none sampleRatio=0.01 service=spicedb v=0 + 8:00PM WRN this version of SpiceDB is out of date. See: https://github.com/authzed/spicedb/releases/tag/v1.26.0 latest-released-version=v1.26.0 this-version=v1.25.0 + 8:00PM INF configuration ClusterDispatchCacheConfig.Enabled=true ClusterDispatchCacheConfig.MaxCost=70% ClusterDispatchCacheConfig.Metrics=true ClusterDispatchCacheConfig.Name=cluster_dispatch ClusterDispatchCacheConfig.NumCounters=100000 Datastore=nil DatastoreConfig.BootstrapFileContents="(map of size 0)" DatastoreConfig.BootstrapFiles=[] DatastoreConfig.BootstrapOverwrite=false DatastoreConfig.BootstrapTimeout=10000 DatastoreConfig.ConnectRate=100 DatastoreConfig.DisableStats=false DatastoreConfig.EnableConnectionBalancing=true DatastoreConfig.EnableDatastoreMetrics=true DatastoreConfig.Engine=memory DatastoreConfig.FollowerReadDelay=4800 DatastoreConfig.GCInterval=180000 DatastoreConfig.GCMaxOperationTime=60000 DatastoreConfig.GCWindow=86400000 DatastoreConfig.LegacyFuzzing=-0.000001 DatastoreConfig.MaxRetries=10 DatastoreConfig.MaxRevisionStalenessPercent=0.1 DatastoreConfig.MigrationPhase=(empty) DatastoreConfig.OverlapKey=key DatastoreConfig.OverlapStrategy=static DatastoreConfig.ReadConnPool.HealthCheckInterval=30000 DatastoreConfig.ReadConnPool.MaxIdleTime=1800000 DatastoreConfig.ReadConnPool.MaxLifetime=1800000 DatastoreConfig.ReadConnPool.MaxLifetimeJitter=0 DatastoreConfig.ReadConnPool.MaxOpenConns=20 DatastoreConfig.ReadConnPool.MinOpenConns=20 DatastoreConfig.ReadOnly=false DatastoreConfig.RequestHedgingEnabled=false DatastoreConfig.RequestHedgingInitialSlowValue=10 DatastoreConfig.RequestHedgingMaxRequests=1000000 DatastoreConfig.RequestHedgingQuantile=0.95 DatastoreConfig.RevisionQuantization=5000 DatastoreConfig.SpannerCredentialsFile=(empty) DatastoreConfig.SpannerEmulatorHost=(empty) DatastoreConfig.TablePrefix=(empty) DatastoreConfig.URI=(empty) DatastoreConfig.WatchBufferLength=1024 DatastoreConfig.WriteConnPool.HealthCheckInterval=30000 DatastoreConfig.WriteConnPool.MaxIdleTime=1800000 DatastoreConfig.WriteConnPool.MaxLifetime=1800000 DatastoreConfig.WriteConnPool.MaxLifetimeJitter=0 DatastoreConfig.WriteConnPool.MaxOpenConns=10 DatastoreConfig.WriteConnPool.MinOpenConns=10 DisableV1SchemaAPI=false DisableVersionResponse=false DispatchCacheConfig.Enabled=true DispatchCacheConfig.MaxCost=30% DispatchCacheConfig.Metrics=true DispatchCacheConfig.Name=dispatch DispatchCacheConfig.NumCounters=10000 DispatchClientMetricsEnabled=true DispatchClientMetricsPrefix=(empty) DispatchClusterMetricsEnabled=true DispatchClusterMetricsPrefix=(empty) DispatchConcurrencyLimits.Check=0 DispatchConcurrencyLimits.LookupResources=0 DispatchConcurrencyLimits.LookupSubjects=0 DispatchConcurrencyLimits.ReachableResources=0 DispatchHashringReplicationFactor=100 DispatchHashringSpread=1 DispatchMaxDepth=50 DispatchServer.Address=:50053 DispatchServer.BufferSize=0 DispatchServer.ClientCAPath=(empty) DispatchServer.Enabled=false DispatchServer.MaxConnAge=30000 DispatchServer.MaxWorkers=0 DispatchServer.Network=tcp DispatchServer.TLSCertPath=(empty) DispatchServer.TLSKeyPath=(empty) DispatchUpstreamAddr=(empty) DispatchUpstreamCAPath=(empty) DispatchUpstreamTimeout=60000 Dispatcher=nil EnableExperimentalWatchableSchemaCache=false GRPCAuthFunc=(value) GRPCServer.Address=:50051 GRPCServer.BufferSize=0 GRPCServer.ClientCAPath=(empty) GRPCServer.Enabled=true GRPCServer.MaxConnAge=30000 GRPCServer.MaxWorkers=0 GRPCServer.Network=tcp GRPCServer.TLSCertPath=(empty) GRPCServer.TLSKeyPath=(empty) GlobalDispatchConcurrencyLimit=50 HTTPGateway.HTTPAddress=:8443 HTTPGateway.HTTPEnabled=false HTTPGateway.HTTPTLSCertPath=(empty) HTTPGateway.HTTPTLSKeyPath=(empty) HTTPGatewayCorsAllowedOrigins=[*] HTTPGatewayCorsEnabled=false HTTPGatewayUpstreamAddr=(empty) HTTPGatewayUpstreamTLSCertPath=(empty) MaxCaveatContextSize=4096 MaxDatastoreReadPageSize=1000 MaxRelationshipContextSize=25000 MaximumPreconditionCount=1000 MaximumUpdatesPerWrite=1000 MetricsAPI.HTTPAddress=:9090 MetricsAPI.HTTPEnabled=true MetricsAPI.HTTPTLSCertPath=(empty) MetricsAPI.HTTPTLSKeyPath=(empty) NamespaceCacheConfig.Enabled=true NamespaceCacheConfig.MaxCost=32MiB NamespaceCacheConfig.Metrics=true NamespaceCacheConfig.Name=namespace NamespaceCacheConfig.NumCounters=1000 PresharedSecureKey=(sensitive) SchemaPrefixesRequired=false ShutdownGracePeriod=0 SilentlyDisableTelemetry=false StreamingAPITimeout=30000 TelemetryCAOverridePath=(empty) TelemetryEndpoint=https://telemetry.authzed.com TelemetryInterval=3600000 V1SchemaAdditiveOnly=false + 8:00PM INF using memory datastore engine + 8:00PM WRN in-memory datastore is not persistent and not feasible to run in a high availability fashion + 8:00PM INF configured namespace cache defaultTTL=0 maxCost="32 MiB" numCounters=1000 + 8:00PM INF datastore driver explicitly asked to skip schema watch datastore-type=*memdb.memdbDatastore + 8:00PM INF configured dispatch cache defaultTTL=20600 maxCost="7.6 GiB" numCounters=10000 + 8:00PM INF configured dispatcher balancerconfig={"loadBalancingConfig":[{"consistent-hashring":{"replicationFactor":100,"spread":1}}]} concurrency-limit-check-permission=50 concurrency-limit-lookup-resources=50 concurrency-limit-lookup-subjects=50 concurrency-limit-reachable-resources=50 + 8:00PM INF grpc server started serving addr=:50051 insecure=true network=tcp service=grpc workers=0 + 8:00PM INF running server datastore=*proxy.observableProxy + 8:00PM INF checking for startable datastore + 8:00PM INF http server started serving addr=:9090 insecure=true service=metrics + 8:00PM INF telemetry reporter scheduled endpoint=https://telemetry.authzed.com interval=1h0m0s next=1m35s + 8:00PM INF received interrupt + 8:00PM INF shutting down + 8:00PM INF http server stopped serving addr=:9090 service=metrics + 8:00PM INF grpc server stopped serving addr=:50051 network=tcp service=grpc + ``` + + + + ```json + {"level":"info","format":"json","log_level":"info","provider":"zerolog","async":false,"time":"2023-11-02T16:48:07-04:00","message":"configured logging"} + {"level":"info","v":0,"provider":"none","endpoint":"","service":"spicedb","insecure":false,"sampleRatio":0.01,"time":"2023-11-02T16:48:07-04:00","message":"configured opentelemetry tracing"} + {"level":"warn","this-version":"v1.25.0","latest-released-version":"v1.26.0","time":"2023-11-02T16:48:07-04:00","message":"this version of SpiceDB is out of date. See: https://github.com/authzed/spicedb/releases/tag/v1.26.0"} + {"level":"info","ClusterDispatchCacheConfig.Enabled":true,"ClusterDispatchCacheConfig.MaxCost":"70%","ClusterDispatchCacheConfig.Metrics":true,"ClusterDispatchCacheConfig.Name":"cluster_dispatch","ClusterDispatchCacheConfig.NumCounters":100000,"Datastore":"nil","DatastoreConfig.BootstrapFileContents":"(map of size 0)","DatastoreConfig.BootstrapFiles":"[]","DatastoreConfig.BootstrapOverwrite":false,"DatastoreConfig.BootstrapTimeout":10000,"DatastoreConfig.ConnectRate":100,"DatastoreConfig.DisableStats":false,"DatastoreConfig.EnableConnectionBalancing":true,"DatastoreConfig.EnableDatastoreMetrics":true,"DatastoreConfig.Engine":"memory","DatastoreConfig.FollowerReadDelay":4800,"DatastoreConfig.GCInterval":180000,"DatastoreConfig.GCMaxOperationTime":60000,"DatastoreConfig.GCWindow":86400000,"DatastoreConfig.LegacyFuzzing":-0.000001,"DatastoreConfig.MaxRetries":10,"DatastoreConfig.MaxRevisionStalenessPercent":0.1,"DatastoreConfig.MigrationPhase":"(empty)","DatastoreConfig.OverlapKey":"key","DatastoreConfig.OverlapStrategy":"static","DatastoreConfig.ReadConnPool.HealthCheckInterval":30000,"DatastoreConfig.ReadConnPool.MaxIdleTime":1800000,"DatastoreConfig.ReadConnPool.MaxLifetime":1800000,"DatastoreConfig.ReadConnPool.MaxLifetimeJitter":0,"DatastoreConfig.ReadConnPool.MaxOpenConns":20,"DatastoreConfig.ReadConnPool.MinOpenConns":20,"DatastoreConfig.ReadOnly":false,"DatastoreConfig.RequestHedgingEnabled":false,"DatastoreConfig.RequestHedgingInitialSlowValue":10,"DatastoreConfig.RequestHedgingMaxRequests":1000000,"DatastoreConfig.RequestHedgingQuantile":0.95,"DatastoreConfig.RevisionQuantization":5000,"DatastoreConfig.SpannerCredentialsFile":"(empty)","DatastoreConfig.SpannerEmulatorHost":"(empty)","DatastoreConfig.TablePrefix":"(empty)","DatastoreConfig.URI":"(empty)","DatastoreConfig.WatchBufferLength":1024,"DatastoreConfig.WriteConnPool.HealthCheckInterval":30000,"DatastoreConfig.WriteConnPool.MaxIdleTime":1800000,"DatastoreConfig.WriteConnPool.MaxLifetime":1800000,"DatastoreConfig.WriteConnPool.MaxLifetimeJitter":0,"DatastoreConfig.WriteConnPool.MaxOpenConns":10,"DatastoreConfig.WriteConnPool.MinOpenConns":10,"DisableV1SchemaAPI":false,"DisableVersionResponse":false,"DispatchCacheConfig.Enabled":true,"DispatchCacheConfig.MaxCost":"30%","DispatchCacheConfig.Metrics":true,"DispatchCacheConfig.Name":"dispatch","DispatchCacheConfig.NumCounters":10000,"DispatchClientMetricsEnabled":true,"DispatchClientMetricsPrefix":"(empty)","DispatchClusterMetricsEnabled":true,"DispatchClusterMetricsPrefix":"(empty)","DispatchConcurrencyLimits.Check":0,"DispatchConcurrencyLimits.LookupResources":0,"DispatchConcurrencyLimits.LookupSubjects":0,"DispatchConcurrencyLimits.ReachableResources":0,"DispatchHashringReplicationFactor":100,"DispatchHashringSpread":1,"DispatchMaxDepth":50,"DispatchServer.Address":":50053","DispatchServer.BufferSize":0,"DispatchServer.ClientCAPath":"(empty)","DispatchServer.Enabled":false,"DispatchServer.MaxConnAge":30000,"DispatchServer.MaxWorkers":0,"DispatchServer.Network":"tcp","DispatchServer.TLSCertPath":"(empty)","DispatchServer.TLSKeyPath":"(empty)","DispatchUpstreamAddr":"(empty)","DispatchUpstreamCAPath":"(empty)","DispatchUpstreamTimeout":60000,"Dispatcher":"nil","EnableExperimentalWatchableSchemaCache":false,"GRPCAuthFunc":"(value)","GRPCServer.Address":":50051","GRPCServer.BufferSize":0,"GRPCServer.ClientCAPath":"(empty)","GRPCServer.Enabled":true,"GRPCServer.MaxConnAge":30000,"GRPCServer.MaxWorkers":0,"GRPCServer.Network":"tcp","GRPCServer.TLSCertPath":"(empty)","GRPCServer.TLSKeyPath":"(empty)","GlobalDispatchConcurrencyLimit":50,"HTTPGateway.HTTPAddress":":8443","HTTPGateway.HTTPEnabled":false,"HTTPGateway.HTTPTLSCertPath":"(empty)","HTTPGateway.HTTPTLSKeyPath":"(empty)","HTTPGatewayCorsAllowedOrigins":"[*]","HTTPGatewayCorsEnabled":false,"HTTPGatewayUpstreamAddr":"(empty)","HTTPGatewayUpstreamTLSCertPath":"(empty)","MaxCaveatContextSize":4096,"MaxDatastoreReadPageSize":1000,"MaxRelationshipContextSize":25000,"MaximumPreconditionCount":1000,"MaximumUpdatesPerWrite":1000,"MetricsAPI.HTTPAddress":":9090","MetricsAPI.HTTPEnabled":true,"MetricsAPI.HTTPTLSCertPath":"(empty)","MetricsAPI.HTTPTLSKeyPath":"(empty)","NamespaceCacheConfig.Enabled":true,"NamespaceCacheConfig.MaxCost":"32MiB","NamespaceCacheConfig.Metrics":true,"NamespaceCacheConfig.Name":"namespace","NamespaceCacheConfig.NumCounters":1000,"PresharedSecureKey":"(sensitive)","SchemaPrefixesRequired":false,"ShutdownGracePeriod":0,"SilentlyDisableTelemetry":false,"StreamingAPITimeout":30000,"TelemetryCAOverridePath":"(empty)","TelemetryEndpoint":"https://telemetry.authzed.com","TelemetryInterval":3600000,"V1SchemaAdditiveOnly":false,"time":"2023-11-02T16:48:07-04:00","message":"configuration"} + {"level":"info","time":"2023-11-02T16:48:07-04:00","message":"using memory datastore engine"} + {"level":"warn","time":"2023-11-02T16:48:07-04:00","message":"in-memory datastore is not persistent and not feasible to run in a high availability fashion"} + {"level":"info","maxCost":"32 MiB","numCounters":1000,"defaultTTL":0,"time":"2023-11-02T16:48:07-04:00","message":"configured namespace cache"} + {"level":"info","datastore-type":"*memdb.memdbDatastore","time":"2023-11-02T16:48:07-04:00","message":"datastore driver explicitly asked to skip schema watch"} + {"level":"info","maxCost":"7.6 GiB","numCounters":10000,"defaultTTL":20600,"time":"2023-11-02T16:48:07-04:00","message":"configured dispatch cache"} + {"level":"info","concurrency-limit-check-permission":50,"concurrency-limit-lookup-resources":50,"concurrency-limit-lookup-subjects":50,"concurrency-limit-reachable-resources":50,"balancerconfig":{"loadBalancingConfig":[{"consistent-hashring":{"replicationFactor":100,"spread":1}}]},"time":"2023-11-02T16:48:07-04:00","message":"configured dispatcher"} + {"level":"info","addr":":50051","network":"tcp","service":"grpc","workers":0,"insecure":true,"time":"2023-11-02T16:48:07-04:00","message":"grpc server started serving"} + {"level":"info","datastore":"*proxy.observableProxy","time":"2023-11-02T16:48:07-04:00","message":"running server"} + {"level":"info","time":"2023-11-02T16:48:07-04:00","message":"checking for startable datastore"} + {"level":"info","addr":":9090","service":"metrics","insecure":true,"time":"2023-11-02T16:48:07-04:00","message":"http server started serving"} + {"level":"info","interval":"1h0m0s","endpoint":"https://telemetry.authzed.com","next":"7s","time":"2023-11-02T16:48:07-04:00","message":"telemetry reporter scheduled"} + {"level":"info","time":"2023-11-02T16:48:08-04:00","message":"received interrupt"} + {"level":"info","time":"2023-11-02T16:48:08-04:00","message":"shutting down"} + {"level":"info","addr":":9090","service":"metrics","time":"2023-11-02T16:48:08-04:00","message":"http server stopped serving"} + {"level":"info","addr":":50051","network":"tcp","service":"grpc","time":"2023-11-02T16:48:08-04:00","message":"grpc server stopped serving"} + ``` + + + +[zerolog]: https://github.com/rs/zerolog +[command flag]: /spicedb/concepts/commands +[stdinouterr]: https://en.wikipedia.org/wiki/Standard_streams +[NDJSON]: https://github.com/ndjson/ndjson-spec + +## Audit Logs + +Audit Logging is functionality exclusive to AuthZed products that publishes logs of SpiceDB API operations to a log sink. + +You can read more about this functionality on the [Audit Logging documentation][audit-logging]. + +[audit-logging]: ../../authzed/concepts/audit-logging + +## Telemetry + +SpiceDB reports metrics that are used to understand how clusters are being configured and the performance they are experiencing. +The intent of collecting this information is to prioritize development that will have the most impact on the community. + + + Telemetry never shares data stored in SpiceDB that may contain anything sensitive. + +Telemetry can always be disabled by providing the flag `--telemetry-endpoint=""`. + + + +[TELEMETRY.md] documents the exact information being collected. + +You can find all of the code in [internal/telemetry]. + +Telemetry is reported via the [Prometheus Remote Write protocol][prom-remote-write]. +Any metrics prefixed with `spicedb_telemetry` are reported hourly to `telemetry.authzed.com`. + +[TELEMETRY.md]: https://github.com/authzed/spicedb/blob/main/TELEMETRY.md +[internal/telemetry]: https://github.com/authzed/spicedb/tree/main/internal/telemetry +[prom-remote-write]: https://prometheus.io/docs/concepts/remote_write_spec/ diff --git a/content/spicedb/ops/operator.mdx b/content/spicedb/ops/operator.mdx new file mode 100644 index 0000000..6437438 --- /dev/null +++ b/content/spicedb/ops/operator.mdx @@ -0,0 +1,251 @@ +# SpiceDB Operator + +The [SpiceDB Operator] is a [Kubernetes Operator] that can manage the installation and lifecycle of SpiceDB clusters. + +SpiceDB is designed not only with cloud-native principles, but also Kubernetes-native principles. +The SpiceDB Operator is the best way to run SpiceDB in production. +Under the hood, all managed AuthZed products leverage the SpiceDB Operator. + +Once the SpiceDB Operator is installed, Kubernetes clusters have a new [Resource] named `SpiceDBCluster`. + +Clusters created with SpiceDBCluster resource have features including: + +- Centralized management for cluster configurations +- Automated SpiceDB upgrades +- Zero-downtime, automated datastore migrations when upgrading SpiceDB + +[SpiceDB Operator]: ../ops/operator +[kubernetes operator]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ +[resource]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#custom-resources + +## Configuration + +### Flags + +SpiceDB flags can be set via the `.spec.config` field on the `SpiceDBCluster` object: + +```yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev +spec: + config: + replicas: 2 + datastoreEngine: cockroachdb + logLevel: debug +``` + +Any CLI flag for SpiceDB can be set in `config` by converting the name of the flag to camelCase and removing dashes. +For example: `--log-level` becomes `logLevel`, `--datastore-engine` becomes `datastoreEngine`, and so on. +The values for these flags are expected to be strings, unless the operator has implemented special support for a specific flag. +This allows the operator to be forward-compatible with new versions of SpiceDB, even if it doesn't know about new features and flags. +There may be exceptions to this rule, but they will be documented in release notes if and when they occur. + +The operator also introduces some new flags that are not present on the CLI: + +| Flag | Description | Type | +| ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| image | A specific container image to run. | string | +| replicas | The number of nodes to run for this cluster. | string or int | +| skipMigrations | If true, the operator will not run migrations on changes to this cluster. | string or bool | +| tlsSecretName | The name of a Kubernetes secret in the same namespace to use as the TLS credentials for SpiceDB services. | string | +| dispatchUpstreamCASecretName | The name of a Kubernetes secret in the same namespace to use as the TLS CA validation. This should be the CA cert that was used to issue the cert in `tlsSecretName` | string | +| datastoreTLSSecretName | The name of a Kubernetes secret containing a TLS secret to use when connecting to the datastore. | string | +| spannerCredentials | The name of a Kubernetes secret containing credentials for talking to Cloud Spanner. Typically, this would not be used, in favor of workload identity. | string | +| extraPodLabels | A set of additional labels to add to the spicedb pods. | string or map[string]string | +| extraPodAnnotations | A set of additional annotations to add to the spicedb pods. | string or map[string]string | + +All other flags are passed through to SpiceDB without any additional processing. + +### Global Config + +The operator comes with a global config file baked into the image. +This defines what the default is and what SpiceDB images are allowed (the operator will run any image as SpiceDB, but will warn if it is not in this list). + +The file is located at `/opt/operator/config.yaml` in released images, and can be changed with the `--config` flag on the operator. + +Example: + +```yaml +allowedImages: + - ghcr.io/authzed/spicedb + - authzed/spicedb + - quay.io/authzed/spicedb +allowedTags: + - v1.11.0 + - v1.10.0 +disableImageValidation: false +imageName: ghcr.io/authzed/spicedb +imageTag: v1.11.0 +``` + +If `disableImageValidation` is `true`, then the operator will not warn if it is running an image outside the allowed list. + +### Passing Additional Configuration + +You can use the `patches` field on the on the `SpiceDBCluster` to modify the resources the operator creates using [Strategic Merge Patch] patches or with [JSON6902] patch operations. + +[Strategic Merge Patch]: https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ +[JSON6902]: https://www.rfc-editor.org/rfc/rfc6902 + +#### Examples + +Strategic merge patch: + +```yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev +spec: + config: + datastoreEngine: memory + secretName: dev-spicedb-config + patches: + - kind: Deployment + patch: + metadata: + labels: + added: via-patch + spec: + template: + metadata: + labels: + added: pod-label-via-patch +``` + +Explicit JSON6902 Patch: + +```yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev +spec: + config: + datastoreEngine: memory + secretName: dev-spicedb-config + patches: + - kind: Deployment + patch: + op: add + path: /metadata/labels + value: + added: via-patch +``` + +You can specify multiple patches for the same object (later in the list are applied over top of earlier in the list): + +```yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev +spec: + config: + datastoreEngine: memory + secretName: dev-spicedb-config + patches: + - kind: Deployment + patch: + op: add + path: /metadata/labels + value: + added: via-patch + - kind: Deployment + patch: + metadata: + labels: + added-2: via-patch-2 +``` + +Wildcard \* can be used to apply a patch to all resources: + +```yaml +apiVersion: authzed.com/v1alpha1 +kind: SpiceDBCluster +metadata: + name: dev +spec: + config: + datastoreEngine: memory + secretName: dev-spicedb-config + patches: + - kind: "*" + patch: + op: add + path: /metadata/labels + value: + added: via-wildcard-patch +``` + +### Bootstrapping CRDs + +The operator can optionally bootstrap CRDs on start up with `--crds=true`. + +This is not generally recommended; it requires granting CRD create permission to the operator. + +### Static SpiceDBClusters + +The operator can optionally take in a set of "static" SpiceDBClusters that it will create on startup, via `--bootstrap-spicedbs`. +This argument is expected to be a file with a yaml list of SpiceDBCluster objects. + +This is not generally recommended; it is primarily for CD of the operator itself. + +### Debug, Logs, Health, and Metrics + +The operator serves a debug endpoint, health, and metrics from the same address, defined by `--debug-addr` (`:8080` by default): + +- `/metrics` serves a prometheus metric endpoint, which includes controller queue depths and stats about abnormal SpiceDBClusters. +- `/debug/pprof/` for profiling data +- `/healthz` for health + +Log level can be configured with `--v` and accepts standard klog flags. + +## Updating + +### Updating the Operator + +Updating the operator is as simple as re-running: + +```sh +kubectl apply --server-side -k github.com/authzed/spicedb-operator/config +``` + +whenever there is an [update available]. + +[update available]: https://github.com/authzed/spicedb-operator/releases + +### Updating Managed SpiceDBClusters + +The operator supports different strategies for updating SpiceDB clusters, described below. + +If keeping on top of updates sounds daunting, [Authzed Dedicated] provides a simple interface for managing SpiceDB upgrades without the hassle. + +[authzed dedicated]: https://authzed.com/pricing + +#### Automatic Updates + +Every release of SpiceDB Operator comes with a version of SpiceDB that it considers the "default" version. +This default version is used for every `SpiceDBCluster` that does not specify a specific `image` in its `.spec.config`. +When either the operator or its config file is updated, every cluster under management will be rolled to the new default version. + +This is recommended primarily for development environments. + +If you wish to have truly zero-touch upgrades, you can automate the updating of the operator with standard git-ops tooling like [Flux] or [ArgoCD]. +In the future, other update mechanisms may become available. + +[flux]: https://github.com/fluxcd/flux2/ +[argocd]: https://argoproj.github.io/cd/ + +#### Manual upgrades + +If you specify a container image from a [SpiceDB release] in `.spec.config.image`, the cluster will not be updated automatically. +You can choose when and how to update to a new release of SpiceDB, which is recommended for production deployments. + +Each operator release only "knows" about previous SpiceDB releases. +Although the operator attempts to be forward-compatible as much as possible, the guarantees are only best-effort. +We recommend updating SpiceDB operator before updating SpiceDB whenever possible. + +[spicedb release]: https://github.com/authzed/spicedb/releases/ diff --git a/content/spicedb/ops/performance.mdx b/content/spicedb/ops/performance.mdx new file mode 100644 index 0000000..2681c9e --- /dev/null +++ b/content/spicedb/ops/performance.mdx @@ -0,0 +1,93 @@ +import { Callout } from "nextra/components"; + +# Improving Performance + +## By enabling cross-node communication + +SpiceDB can be deployed in a clustered configuration where multiple nodes work together to serve API requests. In such a configuration, and for the CheckPermissions API, enabling a feature called **dispatch** allows nodes to break down one API request into smaller "questions" and forward those to other nodes within the cluster. This helps reduce latency and improve overall performance. + +### How it works + +Each SpiceDB node maintains an in-memory cache of permissions queries it has resolved in the past. When a new permissions query is encountered by one node, its answer may be present on another node, so SpiceDB will forward the request onward to the other node to check the shared cache. + +For more details on how dispatching works, see the [Consistent Hash Load Balancing for gRPC] article. + +[consistent hash load balancing for grpc]: https://authzed.com/blog/consistent-hash-load-balancing-grpc/ + +### Configuration in Kubernetes environments + +If using the [SpiceDB Operator], dispatching is enabled by default and no additional configuration is necessary. + +If not using it, you need to set the following flag: + +```sh +--dispatch-upstream-addr=kubernetes:///spicedb.default:50053 +``` + +where `spicedb.default` is the Kubernetes `Service` in which SpiceDB is accessible. + + + If you are deploying SpiceDB under Kubernetes, it is recommended to use the [SpiceDB Operator], which configures dispatching automatically. + +[spicedb operator]: /spicedb/ops/operator + + + +### Configuration in non-Kubernetes environments + + + Non-Kubernetes based dispatching relies upon DNS updates, which means it can + become stale if DNS is changing. This is not recommended unless DNS updates + are rare. + + +To enable dispatch, the following flags must be specified: + +```sh +spicedb serve \ + --dispatch-cluster-enabled=true \ + --dispatch-upstream-addr=upstream-addr \ + ... +``` + +or via environment variables with the `SPICEDB_` prefix: + +```sh +SPICEDB_DISPATCH_CLUSTER_ENABLED=true \ +SPICEDB_DISPATCH_UPSTREAM_ADDR=upstream-addr \ +spicedb serve ... +``` + +The `upstream-addr` should be the DNS address of the load balancer at which _all_ SpiceDB nodes are accessible at the default dispatch port of `:50053`. + +## By enabling Materialize + +[Materialize] is a separate service that allows for the precomputation of permission query results. + +If Materialize is running, SpiceDB can dispatch sub-queries to Materialize, which can significantly speed up permission checks. + +[Materialize]: /authzed/concepts/authzed-materialize + +## By enabling the schema cache + +The schema cache stores type definitions and caveat definitions to avoid repeatedly fetching schema information from the datastore. + +SpiceDB offers two caching modes: + +1. **Just-In-Time (JIT) Caching**: The default mode that loads definitions on-demand. Uses less memory, but it incurs a cold start penalty on first access to each definition. +2. **Watching Cache**: An experimental mode that proactively maintains an always-up-to-date cache. This mode uses more memory but avoids cold start penalties. It is recommended when there are frequent schema changes. + +To configure the schema cache, use the following flags: + +```bash +# Enable namespace cache (default: true) +--ns-cache-enabled=true + +# Maximum memory (default: 32 MiB) +--ns-cache-max-cost=32MiB + +# Enable experimental watchable schema cache (default: false) +# When true: uses watching cache if datastore supports it +# When false: always uses JIT caching +--enable-experimental-watchable-schema-cache=false +``` diff --git a/content/spicedb/ops/secure-rag-pipelines.mdx b/content/spicedb/ops/secure-rag-pipelines.mdx new file mode 100644 index 0000000..b00551a --- /dev/null +++ b/content/spicedb/ops/secure-rag-pipelines.mdx @@ -0,0 +1,74 @@ +import JupyterNotebookViewer from "@/components/JupyterNotebookViewer"; + +# Secure Your RAG Pipelines With Fine Grained Authorization + +Here's how you can use SpiceDB to safeguard sensitive data in RAG pipelines. +You will learn how to pre-filter and post-filter vector database queries with a list of authorized object IDs to improve security and efficiency. + +This guide uses OpenAI, Pinecone, LangChain, Jupyter Notebook and SpiceDB + +## Why is this important? + +Building enterprise-ready AI poses challenges around data security, accuracy, scalability, and integration, especially in compliance-regulated industries like healthcare and finance. +Firms are increasing efforts to mitigate risks associated with LLMs, particularly regarding sensitive data exfiltration of personally identifiable information and/or sensitive company data. +The primary mitigation strategy is to build guardrails around Retrieval-Augmented Generation (RAG) to safeguard data while also optimizing query response quality and efficiency. + +To enable precise guardrails, one must implement permissions systems with advanced fine grained authorization capabilities such as returning lists of authorized subjects and accessible resources. +Such systems ensure timely access to authorized data while preventing exfiltration of sensitive information, making RAGs more efficient and improving performance at scale. + +## Setup and Prerequisites + +- Access to a [SpiceDB](https://authzed.com/spicedb) instance. + You can find instructions for installing SpiceDB [here](https://authzed.com/docs/spicedb/getting-started/install/macos) +- A [Pinecone account](https://www.pinecone.io/) and API key +- An [OpenAI Platform account](https://platform.openai.com/docs/overview) and API key +- [Jupyter Notebook](https://jupyter.org/) running locally + +### Running SpiceDB + +Once you've installed SpiceDB, run a local instance with this command in your terminal: + +`spicedb serve --grpc-preshared-key rag-rebac-walkthrough` + +and you should see something like this that indicates an instance of SpiceDB is running locally: + +``` +8:28PM INF configured logging async=false format=auto log_level=inf +o provider=zerolog +8:28PM INF GOMEMLIMIT is updated GOMEMLIMIT=25769803776 package=git +hub.com/KimMachineGun/automemlimit/memlimit +8:28PM INF configured opentelemetry tracing endpoint= insecure=fals +e provider=none sampleRatio=0.01 service=spicedb v=0 +8:28PM WRN this version of SpiceDB is out of date. See: https://git +hub.com/authzed/spicedb/releases/tag/v1.39.1 latest-released-versio +n=v1.39.1 this-version=v1.37.2 +8:28PM INF configuration ClusterDispatchCacheConfig.CacheKindForTes +ting=(empty) ClusterDispatchCacheConfig.Enabled=true ClusterDispatc +8:28PM INF using memory datastore engine +8:28PM WRN in-memory datastore is not persistent and not feasible t +8:28PM INF configured namespace cache defaultTTL=0 maxCost="32 MiB" +8:28PM INF schema watch explicitly disabled +8:28PM INF configured dispatch cache defaultTTL=20600 maxCost="164 +8:28PM INF configured dispatcher balancerconfig={"loadBalancingConfig":[{"consistent-hashring":{"replicationFactor":100,"spread":1}}]} concurrency-limit-check-permission=50 concurrency-limit-lookup-resources=50 concurrency-limit-lookup-subjects=50 concurrency-limit-reachable-resources=50 +8:28PM INF grpc server started serving addr=:50051 insecure=true network=tcp service=grpc workers=0 +8:28PM INF running server datastore=*schemacaching.definitionCachingProxy +8:28PM INF http server started serving addr=:9090 insecure=true service=metrics +8:28PM INF telemetry reporter scheduled endpoint=https://telemetry.authzed.com interval=1h0m0s next=5m14s +``` + +#### Download the Jupyter Notebook + +Clone the `workshops` [repository](https://github.com/authzed/workshops/) to your system and type `cd secure-rag-pipelines` to enter the working directory. + +Start the `01-rag.ipynb` Notebook locally by typing `jupyter 01-rag.ipynb` (or `python3 -m notebook`) in your terminal. + +## Add Fine Grained Authorization + +Here's the Jupyter Notebook with step-by-step instructions + + + +## Using DeepSeek (or other LLMs) + +If you want to replace the OpenAI LLM with the DeepSeek (or any other) LLM, [check out this branch](https://github.com/authzed/workshops/tree/deepseek). +It follows similar steps as the above guide, but uses the DeepSeek LLM via [OpenRouter](https://openrouter.ai/) diff --git a/globals.css b/globals.css deleted file mode 100644 index 2e87060..0000000 --- a/globals.css +++ /dev/null @@ -1,35 +0,0 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; - -body { - font-feature-settings: - "rlig" 1, - "calt" 1; -} - -/* https://github.com/tjallingt/react-youtube/issues/242 */ -.youtubeContainer { - position: relative; - width: 100%; - height: 0; - padding-bottom: 56.25%; - overflow: hidden; - margin-bottom: 50px; -} - -.youtubeContainer iframe { - width: 100%; - height: 100%; - position: absolute; - top: 0; - left: 0; -} - -.swagger-ui .information-container { - display: none; -} - -.swagger-ui .scheme-container { - display: none; -} diff --git a/mdx-components.ts b/mdx-components.ts new file mode 100644 index 0000000..be597ef --- /dev/null +++ b/mdx-components.ts @@ -0,0 +1,9 @@ +import { useMDXComponents as getDocsMDXComponents } from 'nextra-theme-docs' +import type { Component } from 'react' + +const docsComponents = getDocsMDXComponents() + +export const useMDXComponents = (components?: Component) => ({ + ...docsComponents, + ...components +}) diff --git a/next-env.d.ts b/next-env.d.ts index 52e831b..36a4fe4 100644 --- a/next-env.d.ts +++ b/next-env.d.ts @@ -1,5 +1,7 @@ /// /// +/// +/// // NOTE: This file should not be edited -// see https://nextjs.org/docs/pages/api-reference/config/typescript for more information. +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/next.config.mjs b/next.config.mjs index 551a36b..27c0a28 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -23,8 +23,7 @@ const textProtoGrammar = JSON.parse( ); const withNextra = nextra({ - theme: "nextra-theme-docs", - themeConfig: "./theme.config.tsx", + contentDirBasePath: '/docs', latex: true, search: { codeblocks: false }, defaultShowCopyCode: true, @@ -60,6 +59,10 @@ const withNextra = nextra({ export default withNextra({ basePath: process.env.NEXT_PUBLIC_BASE_DIR ?? undefined, + // NOTE: when you're using the `content` dir approach with nextra, + // you need this setting to make it so that static HTML is generated + // during the build step. This is also what enables pagefind to work. + output: "export", assetPrefix: process.env.VERCEL_ENV === "production" ? "https://docs-authzed.vercel.app/docs" diff --git a/package.json b/package.json index 495921b..976db54 100644 --- a/package.json +++ b/package.json @@ -8,6 +8,7 @@ "postbuild": "./scripts/postbuild.sh", "start": "next start", "lint:markdown": "markdownlint-cli2", + "gen:pagefind": "node --experimental-strip-types scripts/buildSearchIndex.mts", "format:check": "prettier -c .", "format": "prettier -w ." }, @@ -20,43 +21,40 @@ "@fortawesome/free-brands-svg-icons": "^6.5.2", "@fortawesome/free-solid-svg-icons": "^6.5.2", "@fortawesome/react-fontawesome": "^0.2.2", - "@headlessui/react": "^1.7.19", - "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-slot": "^1.2.4", "@segment/in-eu": "^0.4.0", "@svgr/webpack": "^8.1.0", "@vercel/speed-insights": "^1.0.12", - "class-variance-authority": "^0.7.0", + "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "escape-string-regexp": "^5.0.0", - "flexsearch": "^0.8.143", "js-yaml": "^4.1.0", "next": "^15.5.7", "next-sitemap": "^4.2.3", - "nextra": "^3", - "nextra-theme-docs": "^3", + "nextra": "^4.6.0", + "nextra-theme-docs": "^4.6.0", "posthog-js": "^1.223.5", "react": "^18.3.1", "react-dom": "^18.3.1", "react-youtube": "^10.1.0", "sharp": "^0.34.0", "swagger-ui-react": "^5.30.2", - "tailwind-merge": "^2.4.0" + "tailwind-merge": "^3.4.0", + "tailwindcss": "^4.1.17" }, "devDependencies": { - "@tailwindcss/line-clamp": "^0.4.4", - "@tailwindcss/typography": "^0.5.13", + "@tailwindcss/postcss": "^4.1.17", "@types/flexsearch": "^0.7.6", "@types/js-yaml": "^4.0.9", "@types/node": "22.15.29", "@types/react": "^19.1.6", - "autoprefixer": "^10.4.19", "markdownlint-cli2": "^0.13.0", "markdownlint-rule-max-one-sentence-per-line": "^0.0.2", - "postcss": "^8.4.39", + "pagefind": "^1.4.0", + "postcss": "^8.5.6", "prettier": "^3.6.2", - "shiki": "^1.29.2", - "tailwindcss": "^3.4.4", - "typescript": "^4.9.5", + "shiki": "^3.15.0", + "typescript": "^5.9.3", "yaml-loader": "^0.8.1" }, "packageManager": "pnpm@10.17.1" diff --git a/pagefind.log b/pagefind.log new file mode 100644 index 0000000..be78fc8 --- /dev/null +++ b/pagefind.log @@ -0,0 +1,28 @@ +Pagefind logging initialized +[Status]: Running Pagefind v1.4.0 (Extended) +[Info]: Running in verbose mode +[Info]: Running from: "/home/tstirrat/authzed/docs" +[Info]: Source: "out" +[Info]: Output: "public/_pagefind" +[Status]: [Walking source directory] +[Info]: Found 68 files matching **/*.{html} +[Status]: [Parsing files] +[Info]: Found a data-pagefind-body element on the site. +↳ Ignoring pages without this tag. +[Status]: [Reading languages] +[Info]: Discovered 1 language: en +[Info]: * en: 57 pages +[Status]: [Building search indexes] +[Info]: Language en: + Indexed 57 pages + Indexed 2945 words + Indexed 0 filters + Indexed 0 sorts + +[Info]: Total: + Indexed 1 language + Indexed 57 pages + Indexed 2945 words + Indexed 0 filters + Indexed 0 sorts +[Status]: Finished in 0.306 seconds diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index cee3b54..d148189 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -20,33 +20,27 @@ importers: '@fortawesome/react-fontawesome': specifier: ^0.2.2 version: 0.2.2(@fortawesome/fontawesome-svg-core@6.5.2)(react@18.3.1) - '@headlessui/react': - specifier: ^1.7.19 - version: 1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-slot': - specifier: ^1.1.0 - version: 1.1.0(@types/react@19.1.6)(react@18.3.1) + specifier: ^1.2.4 + version: 1.2.4(@types/react@19.1.6)(react@18.3.1) '@segment/in-eu': specifier: ^0.4.0 version: 0.4.0 '@svgr/webpack': specifier: ^8.1.0 - version: 8.1.0(typescript@4.9.5) + version: 8.1.0(typescript@5.9.3) '@vercel/speed-insights': specifier: ^1.0.12 version: 1.0.12(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) class-variance-authority: - specifier: ^0.7.0 - version: 0.7.0 + specifier: ^0.7.1 + version: 0.7.1 clsx: specifier: ^2.1.1 version: 2.1.1 escape-string-regexp: specifier: ^5.0.0 version: 5.0.0 - flexsearch: - specifier: ^0.8.143 - version: 0.8.158 js-yaml: specifier: ^4.1.0 version: 4.1.0 @@ -57,11 +51,11 @@ importers: specifier: ^4.2.3 version: 4.2.3(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) nextra: - specifier: ^3 - version: 3.3.1(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@4.9.5) + specifier: ^4.6.0 + version: 4.6.0(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3) nextra-theme-docs: - specifier: ^3 - version: 3.3.1(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(nextra@3.3.1(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@4.9.5))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^4.6.0 + version: 4.6.0(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(nextra@4.6.0(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(use-sync-external-store@1.6.0(react@18.3.1)) posthog-js: specifier: ^1.223.5 version: 1.223.5(@rrweb/types@2.0.0-alpha.17) @@ -81,15 +75,15 @@ importers: specifier: ^5.30.2 version: 5.30.2(@types/react@19.1.6)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) tailwind-merge: - specifier: ^2.4.0 - version: 2.4.0 + specifier: ^3.4.0 + version: 3.4.0 + tailwindcss: + specifier: ^4.1.17 + version: 4.1.17 devDependencies: - '@tailwindcss/line-clamp': - specifier: ^0.4.4 - version: 0.4.4(tailwindcss@3.4.4) - '@tailwindcss/typography': - specifier: ^0.5.13 - version: 0.5.13(tailwindcss@3.4.4) + '@tailwindcss/postcss': + specifier: ^4.1.17 + version: 4.1.17 '@types/flexsearch': specifier: ^0.7.6 version: 0.7.6 @@ -102,30 +96,27 @@ importers: '@types/react': specifier: ^19.1.6 version: 19.1.6 - autoprefixer: - specifier: ^10.4.19 - version: 10.4.19(postcss@8.4.39) markdownlint-cli2: specifier: ^0.13.0 version: 0.13.0 markdownlint-rule-max-one-sentence-per-line: specifier: ^0.0.2 version: 0.0.2 + pagefind: + specifier: ^1.4.0 + version: 1.4.0 postcss: - specifier: ^8.4.39 - version: 8.4.39 + specifier: ^8.5.6 + version: 8.5.6 prettier: specifier: ^3.6.2 version: 3.6.2 shiki: - specifier: ^1.29.2 - version: 1.29.2 - tailwindcss: - specifier: ^3.4.4 - version: 3.4.4 + specifier: ^3.15.0 + version: 3.15.0 typescript: - specifier: ^4.9.5 - version: 4.9.5 + specifier: ^5.9.3 + version: 5.9.3 yaml-loader: specifier: ^0.8.1 version: 0.8.1 @@ -838,8 +829,8 @@ packages: '@floating-ui/utils@0.2.10': resolution: {integrity: sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==} - '@formatjs/intl-localematcher@0.5.10': - resolution: {integrity: sha512-af3qATX+m4Rnd9+wHcjJ4w2ijq+rAVP3CCinJQvFv1kgSu1W6jypUmvleJxcewdxmutM8dmIRZFxO/IQBZmP2Q==} + '@formatjs/intl-localematcher@0.6.2': + resolution: {integrity: sha512-XOMO2Hupl0wdd172Y06h6kLpBz6Dv+J4okPLl4LPtzbr8f66WbIoy4ev98EBuZ6ZK4h5ydTN6XneT4QVpD7cdA==} '@fortawesome/fontawesome-common-types@6.5.2': resolution: {integrity: sha512-gBxPg3aVO6J0kpfHNILc+NMhXnqHumFxOmjYCFfOiLZfwhnnfhtsdA2hfJlDnj+8PjAs6kKQPenOTKj3Rf7zHw==} @@ -863,13 +854,6 @@ packages: '@fortawesome/fontawesome-svg-core': ~1 || ~6 react: '>=16.3' - '@headlessui/react@1.7.19': - resolution: {integrity: sha512-Ll+8q3OlMJfJbAKM/+/Y2q6PPYbryqNTXDbryx7SXLIDamkF6iQFbriYHga0dY44PvDhvvBWCx1Xj4U5+G4hOw==} - engines: {node: '>=10'} - peerDependencies: - react: ^16 || ^17 || ^18 - react-dom: ^16 || ^17 || ^18 - '@headlessui/react@2.2.9': resolution: {integrity: sha512-Mb+Un58gwBn0/yWZfyrCh0TJyurtT+dETj7YHleylHk5od3dv2XqETPGWMyQ5/7sYN7oWdyM1u9MvC0OC8UmzQ==} engines: {node: '>=10'} @@ -1130,14 +1114,21 @@ packages: cpu: [x64] os: [win32] - '@isaacs/cliui@8.0.2': - resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} - engines: {node: '>=12'} + '@isaacs/balanced-match@4.0.1': + resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} + engines: {node: 20 || >=22} + + '@isaacs/brace-expansion@5.0.0': + resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==} + engines: {node: 20 || >=22} '@jridgewell/gen-mapping@0.3.5': resolution: {integrity: sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==} engines: {node: '>=6.0.0'} + '@jridgewell/remapping@2.3.5': + resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} + '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} @@ -1149,107 +1140,110 @@ packages: '@jridgewell/sourcemap-codec@1.4.15': resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + '@jridgewell/trace-mapping@0.3.25': resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} '@mdx-js/mdx@3.1.1': resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==} - '@mdx-js/react@3.1.1': - resolution: {integrity: sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==} - peerDependencies: - '@types/react': '>=16' - react: '>=16' - '@mermaid-js/parser@0.6.3': resolution: {integrity: sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==} - '@napi-rs/simple-git-android-arm-eabi@0.1.19': - resolution: {integrity: sha512-XryEH/hadZ4Duk/HS/HC/cA1j0RHmqUGey3MsCf65ZS0VrWMqChXM/xlTPWuY5jfCc/rPubHaqI7DZlbexnX/g==} + '@napi-rs/simple-git-android-arm-eabi@0.1.22': + resolution: {integrity: sha512-JQZdnDNm8o43A5GOzwN/0Tz3CDBQtBUNqzVwEopm32uayjdjxev1Csp1JeaqF3v9djLDIvsSE39ecsN2LhCKKQ==} engines: {node: '>= 10'} cpu: [arm] os: [android] - '@napi-rs/simple-git-android-arm64@0.1.19': - resolution: {integrity: sha512-ZQ0cPvY6nV9p7zrR9ZPo7hQBkDAcY/CHj3BjYNhykeUCiSNCrhvwX+WEeg5on8M1j4d5jcI/cwVG2FslfiByUg==} + '@napi-rs/simple-git-android-arm64@0.1.22': + resolution: {integrity: sha512-46OZ0SkhnvM+fapWjzg/eqbJvClxynUpWYyYBn4jAj7GQs1/Yyc8431spzDmkA8mL0M7Xo8SmbkzTDE7WwYAfg==} engines: {node: '>= 10'} cpu: [arm64] os: [android] - '@napi-rs/simple-git-darwin-arm64@0.1.19': - resolution: {integrity: sha512-viZB5TYgjA1vH+QluhxZo0WKro3xBA+1xSzYx8mcxUMO5gnAoUMwXn0ZO/6Zy6pai+aGae+cj6XihGnrBRu3Pg==} + '@napi-rs/simple-git-darwin-arm64@0.1.22': + resolution: {integrity: sha512-zH3h0C8Mkn9//MajPI6kHnttywjsBmZ37fhLX/Fiw5XKu84eHA6dRyVtMzoZxj6s+bjNTgaMgMUucxPn9ktxTQ==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@napi-rs/simple-git-darwin-x64@0.1.19': - resolution: {integrity: sha512-6dNkzSNUV5X9rsVYQbpZLyJu4Gtkl2vNJ3abBXHX/Etk0ILG5ZasO3ncznIANZQpqcbn/QPHr49J2QYAXGoKJA==} + '@napi-rs/simple-git-darwin-x64@0.1.22': + resolution: {integrity: sha512-GZN7lRAkGKB6PJxWsoyeYJhh85oOOjVNyl+/uipNX8bR+mFDCqRsCE3rRCFGV9WrZUHXkcuRL2laIRn7lLi3ag==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@napi-rs/simple-git-freebsd-x64@0.1.19': - resolution: {integrity: sha512-sB9krVIchzd20FjI2ZZ8FDsTSsXLBdnwJ6CpeVyrhXHnoszfcqxt49ocZHujAS9lMpXq7i2Nv1EXJmCy4KdhwA==} + '@napi-rs/simple-git-freebsd-x64@0.1.22': + resolution: {integrity: sha512-xyqX1C5I0WBrUgZONxHjZH5a4LqQ9oki3SKFAVpercVYAcx3pq6BkZy1YUOP4qx78WxU1CCNfHBN7V+XO7D99A==} engines: {node: '>= 10'} cpu: [x64] os: [freebsd] - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': - resolution: {integrity: sha512-6HPn09lr9N1n5/XKfP8Np53g4fEXVxOFqNkS6rTH3Rm1lZHdazTRH62RggXLTguZwjcE+MvOLvoTIoR5kAS8+g==} + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.22': + resolution: {integrity: sha512-4LOtbp9ll93B9fxRvXiUJd1/RM3uafMJE7dGBZGKWBMGM76+BAcCEUv2BY85EfsU/IgopXI6n09TycRfPWOjxA==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': - resolution: {integrity: sha512-G0gISckt4cVDp3oh5Z6PV3GHJrJO6Z8bIS+9xA7vTtKdqB1i5y0n3cSFLlzQciLzhr+CajFD27doW4lEyErQ/Q==} + '@napi-rs/simple-git-linux-arm64-gnu@0.1.22': + resolution: {integrity: sha512-GVOjP/JjCzbQ0kSqao7ctC/1sodVtv5VF57rW9BFpo2y6tEYPCqHnkQkTpieuwMNe+TVOhBUC1+wH0d9/knIHg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-arm64-musl@0.1.19': - resolution: {integrity: sha512-OwTRF+H4IZYxmDFRi1IrLMfqbdIpvHeYbJl2X94NVsLVOY+3NUHvEzL3fYaVx5urBaMnIK0DD3wZLbcueWvxbA==} + '@napi-rs/simple-git-linux-arm64-musl@0.1.22': + resolution: {integrity: sha512-MOs7fPyJiU/wqOpKzAOmOpxJ/TZfP4JwmvPad/cXTOWYwwyppMlXFRms3i98EU3HOazI/wMU2Ksfda3+TBluWA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': - resolution: {integrity: sha512-p7zuNNVyzpRvkCt2RIGv9FX/WPcPbZ6/FRUgUTZkA2WU33mrbvNqSi4AOqCCl6mBvEd+EOw5NU4lS9ORRJvAEg==} + '@napi-rs/simple-git-linux-ppc64-gnu@0.1.22': + resolution: {integrity: sha512-L59dR30VBShRUIZ5/cQHU25upNgKS0AMQ7537J6LCIUEFwwXrKORZKJ8ceR+s3Sr/4jempWVvMdjEpFDE4HYww==} engines: {node: '>= 10'} - cpu: [powerpc64le] + cpu: [ppc64] os: [linux] - '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': - resolution: {integrity: sha512-6N2vwJUPLiak8GLrS0a3is0gSb0UwI2CHOOqtvQxPmv+JVI8kn3vKiUscsktdDb0wGEPeZ8PvZs0y8UWix7K4g==} + '@napi-rs/simple-git-linux-s390x-gnu@0.1.22': + resolution: {integrity: sha512-4FHkPlCSIZUGC6HiADffbe6NVoTBMd65pIwcd40IDbtFKOgFMBA+pWRqKiQ21FERGH16Zed7XHJJoY3jpOqtmQ==} engines: {node: '>= 10'} cpu: [s390x] os: [linux] - '@napi-rs/simple-git-linux-x64-gnu@0.1.19': - resolution: {integrity: sha512-61YfeO1J13WK7MalLgP3QlV6of2rWnVw1aqxWkAgy/lGxoOFSJ4Wid6ANVCEZk4tJpPX/XNeneqkUz5xpeb2Cw==} + '@napi-rs/simple-git-linux-x64-gnu@0.1.22': + resolution: {integrity: sha512-Ei1tM5Ho/dwknF3pOzqkNW9Iv8oFzRxE8uOhrITcdlpxRxVrBVptUF6/0WPdvd7R9747D/q61QG/AVyWsWLFKw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-linux-x64-musl@0.1.19': - resolution: {integrity: sha512-cCTWNpMJnN3PrUBItWcs3dQKCydsIasbrS3laMzq8k7OzF93Zrp2LWDTPlLCO9brbBVpBzy2Qk5Xg9uAfe/Ukw==} + '@napi-rs/simple-git-linux-x64-musl@0.1.22': + resolution: {integrity: sha512-zRYxg7it0p3rLyEJYoCoL2PQJNgArVLyNavHW03TFUAYkYi5bxQ/UFNVpgxMaXohr5yu7qCBqeo9j4DWeysalg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': - resolution: {integrity: sha512-sWavb1BjeLKKBA+PbTsRSSzVNfb7V/dOpaJvkgR5d2kWFn/AHmCZHSSj/3nyZdYf0BdDC+DIvqk3daAEZ6QMVw==} + '@napi-rs/simple-git-win32-arm64-msvc@0.1.22': + resolution: {integrity: sha512-XGFR1fj+Y9cWACcovV2Ey/R2xQOZKs8t+7KHPerYdJ4PtjVzGznI4c2EBHXtdOIYvkw7tL5rZ7FN1HJKdD5Quw==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@napi-rs/simple-git-win32-x64-msvc@0.1.19': - resolution: {integrity: sha512-FmNuPoK4+qwaSCkp8lm3sJlrxk374enW+zCE5ZksXlZzj/9BDJAULJb5QUJ7o9Y8A/G+d8LkdQLPBE2Jaxe5XA==} + '@napi-rs/simple-git-win32-ia32-msvc@0.1.22': + resolution: {integrity: sha512-Gqr9Y0gs6hcNBA1IXBpoqTFnnIoHuZGhrYqaZzEvGMLrTrpbXrXVEtX3DAAD2RLc1b87CPcJ49a7sre3PU3Rfw==} + engines: {node: '>= 10'} + cpu: [ia32] + os: [win32] + + '@napi-rs/simple-git-win32-x64-msvc@0.1.22': + resolution: {integrity: sha512-hQjcreHmUcpw4UrtkOron1/TQObfe484lxiXFLLUj7aWnnnOVs1mnXq5/Bo9+3NYZldFpFRJPdPBeHCisXkKJg==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@napi-rs/simple-git@0.1.19': - resolution: {integrity: sha512-jMxvwzkKzd3cXo2EB9GM2ic0eYo2rP/BS6gJt6HnWbsDO1O8GSD4k7o2Cpr2YERtMpGF/MGcDfsfj2EbQPtrXw==} + '@napi-rs/simple-git@0.1.22': + resolution: {integrity: sha512-bMVoAKhpjTOPHkW/lprDPwv5aD4R4C3Irt8vn+SKA9wudLe9COLxOhurrKRsxmZccUbWXRF7vukNeGUAj5P8kA==} engines: {node: '>= 10'} '@next/env@13.5.6': @@ -1318,12 +1312,38 @@ packages: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} - '@pkgjs/parseargs@0.11.0': - resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} - engines: {node: '>=14'} + '@pagefind/darwin-arm64@1.4.0': + resolution: {integrity: sha512-2vMqkbv3lbx1Awea90gTaBsvpzgRs7MuSgKDxW0m9oV1GPZCZbZBJg/qL83GIUEN2BFlY46dtUZi54pwH+/pTQ==} + cpu: [arm64] + os: [darwin] + + '@pagefind/darwin-x64@1.4.0': + resolution: {integrity: sha512-e7JPIS6L9/cJfow+/IAqknsGqEPjJnVXGjpGm25bnq+NPdoD3c/7fAwr1OXkG4Ocjx6ZGSCijXEV4ryMcH2E3A==} + cpu: [x64] + os: [darwin] + + '@pagefind/freebsd-x64@1.4.0': + resolution: {integrity: sha512-WcJVypXSZ+9HpiqZjFXMUobfFfZZ6NzIYtkhQ9eOhZrQpeY5uQFqNWLCk7w9RkMUwBv1HAMDW3YJQl/8OqsV0Q==} + cpu: [x64] + os: [freebsd] + + '@pagefind/linux-arm64@1.4.0': + resolution: {integrity: sha512-PIt8dkqt4W06KGmQjONw7EZbhDF+uXI7i0XtRLN1vjCUxM9vGPdtJc2mUyVPevjomrGz5M86M8bqTr6cgDp1Uw==} + cpu: [arm64] + os: [linux] + + '@pagefind/linux-x64@1.4.0': + resolution: {integrity: sha512-z4oddcWwQ0UHrTHR8psLnVlz6USGJ/eOlDPTDYZ4cI8TK8PgwRUPQZp9D2iJPNIPcS6Qx/E4TebjuGJOyK8Mmg==} + cpu: [x64] + os: [linux] + + '@pagefind/windows-x64@1.4.0': + resolution: {integrity: sha512-NkT+YAdgS2FPCn8mIA9bQhiBs+xmniMGq1LFPDhcFn0+2yIUEiIG06t7bsZlhdjknEQRTSdT7YitP6fC5qwP0g==} + cpu: [x64] + os: [win32] - '@radix-ui/react-compose-refs@1.1.0': - resolution: {integrity: sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==} + '@radix-ui/react-compose-refs@1.1.2': + resolution: {integrity: sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==} peerDependencies: '@types/react': '*' react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc @@ -1331,8 +1351,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-slot@1.1.0': - resolution: {integrity: sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==} + '@radix-ui/react-slot@1.2.4': + resolution: {integrity: sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==} peerDependencies: '@types/react': '*' react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc @@ -1386,26 +1406,28 @@ packages: '@segment/in-eu@0.4.0': resolution: {integrity: sha512-4JM6fMRMy1nZQk5x1nB1R4iIFOkcyCiUNOPQo161nUUsNE+U97l46v4xLIqQJAMjXjfxnULFFp+9uf6PYdnLtQ==} - '@shikijs/core@1.29.2': - resolution: {integrity: sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ==} + '@shikijs/core@3.15.0': + resolution: {integrity: sha512-8TOG6yG557q+fMsSVa8nkEDOZNTSxjbbR8l6lF2gyr6Np+jrPlslqDxQkN6rMXCECQ3isNPZAGszAfYoJOPGlg==} - '@shikijs/engine-javascript@1.29.2': - resolution: {integrity: sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A==} + '@shikijs/engine-javascript@3.15.0': + resolution: {integrity: sha512-ZedbOFpopibdLmvTz2sJPJgns8Xvyabe2QbmqMTz07kt1pTzfEvKZc5IqPVO/XFiEbbNyaOpjPBkkr1vlwS+qg==} - '@shikijs/engine-oniguruma@1.29.2': - resolution: {integrity: sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA==} + '@shikijs/engine-oniguruma@3.15.0': + resolution: {integrity: sha512-HnqFsV11skAHvOArMZdLBZZApRSYS4LSztk2K3016Y9VCyZISnlYUYsL2hzlS7tPqKHvNqmI5JSUJZprXloMvA==} - '@shikijs/langs@1.29.2': - resolution: {integrity: sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ==} + '@shikijs/langs@3.15.0': + resolution: {integrity: sha512-WpRvEFvkVvO65uKYW4Rzxs+IG0gToyM8SARQMtGGsH4GDMNZrr60qdggXrFOsdfOVssG/QQGEl3FnJ3EZ+8w8A==} - '@shikijs/themes@1.29.2': - resolution: {integrity: sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g==} + '@shikijs/themes@3.15.0': + resolution: {integrity: sha512-8ow2zWb1IDvCKjYb0KiLNrK4offFdkfNVPXb1OZykpLCzRU6j+efkY+Y7VQjNlNFXonSw+4AOdGYtmqykDbRiQ==} - '@shikijs/twoslash@1.29.2': - resolution: {integrity: sha512-2S04ppAEa477tiaLfGEn1QJWbZUmbk8UoPbAEw4PifsrxkBXtAtOflIZJNtuCwz8ptc/TPxy7CO7gW4Uoi6o/g==} + '@shikijs/twoslash@3.15.0': + resolution: {integrity: sha512-3GoJvYMm2oj4Mq+yJyXt9vmMFfih34FBlLMYLRAIXNmBrj3/6jsuHKakGHMVza5jui6TmmjbS5bmJI29UHftQQ==} + peerDependencies: + typescript: '>=5.5.0' - '@shikijs/types@1.29.2': - resolution: {integrity: sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw==} + '@shikijs/types@3.15.0': + resolution: {integrity: sha512-BnP+y/EQnhihgHy4oIAN+6FFtmfTekwOLsQbRw9hOKwqgNy8Bdsjq8B05oAt/ZgvIWWFrshV71ytOrlPfYjIJw==} '@shikijs/vscode-textmate@10.0.2': resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} @@ -1593,15 +1615,96 @@ packages: '@swc/helpers@0.5.15': resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} - '@tailwindcss/line-clamp@0.4.4': - resolution: {integrity: sha512-5U6SY5z8N42VtrCrKlsTAA35gy2VSyYtHWCsg1H87NU1SXnEfekTVlrga9fzUDrrHcGi2Lb5KenUWb4lRQT5/g==} - peerDependencies: - tailwindcss: '>=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1' + '@swc/helpers@0.5.17': + resolution: {integrity: sha512-5IKx/Y13RsYd+sauPb2x+U/xZikHjolzfuDgTAl/Tdf3Q8rslRvC19NKDLgAJQ6wsqADk10ntlv08nPFw/gO/A==} - '@tailwindcss/typography@0.5.13': - resolution: {integrity: sha512-ADGcJ8dX21dVVHIwTRgzrcunY6YY9uSlAHHGVKvkA+vLc5qLwEszvKts40lx7z0qc4clpjclwLeK5rVCV2P/uw==} - peerDependencies: - tailwindcss: '>=3.0.0 || insiders' + '@tailwindcss/node@4.1.17': + resolution: {integrity: sha512-csIkHIgLb3JisEFQ0vxr2Y57GUNYh447C8xzwj89U/8fdW8LhProdxvnVH6U8M2Y73QKiTIH+LWbK3V2BBZsAg==} + + '@tailwindcss/oxide-android-arm64@4.1.17': + resolution: {integrity: sha512-BMqpkJHgOZ5z78qqiGE6ZIRExyaHyuxjgrJ6eBO5+hfrfGkuya0lYfw8fRHG77gdTjWkNWEEm+qeG2cDMxArLQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.1.17': + resolution: {integrity: sha512-EquyumkQweUBNk1zGEU/wfZo2qkp/nQKRZM8bUYO0J+Lums5+wl2CcG1f9BgAjn/u9pJzdYddHWBiFXJTcxmOg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.1.17': + resolution: {integrity: sha512-gdhEPLzke2Pog8s12oADwYu0IAw04Y2tlmgVzIN0+046ytcgx8uZmCzEg4VcQh+AHKiS7xaL8kGo/QTiNEGRog==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.1.17': + resolution: {integrity: sha512-hxGS81KskMxML9DXsaXT1H0DyA+ZBIbyG/sSAjWNe2EDl7TkPOBI42GBV3u38itzGUOmFfCzk1iAjDXds8Oh0g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.17': + resolution: {integrity: sha512-k7jWk5E3ldAdw0cNglhjSgv501u7yrMf8oeZ0cElhxU6Y2o7f8yqelOp3fhf7evjIS6ujTI3U8pKUXV2I4iXHQ==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.17': + resolution: {integrity: sha512-HVDOm/mxK6+TbARwdW17WrgDYEGzmoYayrCgmLEw7FxTPLcp/glBisuyWkFz/jb7ZfiAXAXUACfyItn+nTgsdQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.1.17': + resolution: {integrity: sha512-HvZLfGr42i5anKtIeQzxdkw/wPqIbpeZqe7vd3V9vI3RQxe3xU1fLjss0TjyhxWcBaipk7NYwSrwTwK1hJARMg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.1.17': + resolution: {integrity: sha512-M3XZuORCGB7VPOEDH+nzpJ21XPvK5PyjlkSFkFziNHGLc5d6g3di2McAAblmaSUNl8IOmzYwLx9NsE7bplNkwQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.1.17': + resolution: {integrity: sha512-k7f+pf9eXLEey4pBlw+8dgfJHY4PZ5qOUFDyNf7SI6lHjQ9Zt7+NcscjpwdCEbYi6FI5c2KDTDWyf2iHcCSyyQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-wasm32-wasi@4.1.17': + resolution: {integrity: sha512-cEytGqSSoy7zK4JRWiTCx43FsKP/zGr0CsuMawhH67ONlH+T79VteQeJQRO/X7L0juEUA8ZyuYikcRBf0vsxhg==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + bundledDependencies: + - '@napi-rs/wasm-runtime' + - '@emnapi/core' + - '@emnapi/runtime' + - '@tybys/wasm-util' + - '@emnapi/wasi-threads' + - tslib + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.17': + resolution: {integrity: sha512-JU5AHr7gKbZlOGvMdb4722/0aYbU+tN6lv1kONx0JK2cGsh7g148zVWLM0IKR3NeKLv+L90chBVYcJ8uJWbC9A==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.1.17': + resolution: {integrity: sha512-SKWM4waLuqx0IH+FMDUw6R66Hu4OuTALFgnleKbqhgGU30DY20NORZMZUKgLRjQXNN2TLzKvh48QXTig4h4bGw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.1.17': + resolution: {integrity: sha512-F0F7d01fmkQhsTjXezGBLdrl1KresJTcI3DB8EkScCldyKp3Msz4hub4uyYaVnk88BAS1g5DQjjF6F5qczheLA==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.1.17': + resolution: {integrity: sha512-+nKl9N9mN5uJ+M7dBOOCzINw94MPstNR/GtIhz1fpZysxL/4a+No64jCBD6CPN+bIHWFx3KWuu8XJRrj/572Dw==} '@tanstack/react-virtual@3.13.12': resolution: {integrity: sha512-Gd13QdxPSukP8ZrkbgS2RwoZseTTbQPLnQEn7HY/rqtM+8Zt95f7xKC7N0EsKs7aoz0WzZ+fditZux+F8EzYxA==} @@ -1609,22 +1712,13 @@ packages: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - '@tanstack/react-virtual@3.8.1': - resolution: {integrity: sha512-dP5a7giEM4BQWLJ7K07ToZv8rF51mzbrBMkf0scg1QNYuFx3utnPUBPUHdzaowZhIez1K2XS78amuzD+YGRA5Q==} - peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - '@tanstack/virtual-core@3.13.12': resolution: {integrity: sha512-1YBOJfRHV4sXUmWsFSf5rQor4Ss82G8dQWLRbnk3GA4jeP8hQt1hxXh0tmflpC0dz3VgEv/1+qwPyLeWkQuPFA==} - '@tanstack/virtual-core@3.8.1': - resolution: {integrity: sha512-uNtAwenT276M9QYCjTBoHZ8X3MUeCRoGK59zPi92hMIxdfS9AyHjkDWJ94WroDxnv48UE+hIeo21BU84jKc8aQ==} - - '@theguild/remark-mermaid@0.1.3': - resolution: {integrity: sha512-2FjVlaaKXK7Zj7UJAgOVTyaahn/3/EAfqYhyXg0BfDBVUl+lXcoIWRaxzqfnDr2rv8ax6GsC5mNh6hAaT86PDw==} + '@theguild/remark-mermaid@0.3.0': + resolution: {integrity: sha512-Fy1J4FSj8totuHsHFpaeWyWRaRSIvpzGTRoEfnNJc1JmLV9uV70sYE3zcT+Jj5Yw20Xq4iCsiT+3Ho49BBZcBQ==} peerDependencies: - react: ^18.2.0 + react: ^18.2.0 || ^19.0.0 '@theguild/remark-npm2yarn@0.3.3': resolution: {integrity: sha512-ma6DvR03gdbvwqfKx1omqhg9May/VYGdMHvTzB4VuxkyS7KzfZ/lzrj43hmcsggpMje0x7SADA/pcMph0ejRnA==} @@ -1641,6 +1735,9 @@ packages: resolution: {integrity: sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==} engines: {node: '>=10.13.0'} + '@ts-morph/common@0.28.1': + resolution: {integrity: sha512-W74iWf7ILp1ZKNYXY5qbddNaml7e9Sedv5lvU1V8lftlitkc9Pq1A+jlH23ltDgWYeZFFEqGCD1Ies9hqu3O+g==} + '@types/d3-array@3.2.2': resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} @@ -1740,8 +1837,8 @@ packages: '@types/estree-jsx@1.0.5': resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} - '@types/estree@1.0.7': - resolution: {integrity: sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==} + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} '@types/flexsearch@0.7.6': resolution: {integrity: sha512-H5IXcRn96/gaDmo+rDl2aJuIJsob8dgOXDqf8K0t8rWZd1AFNaaspmRsElESiU+EWE33qfbFPgI0OC/B1g9FCA==} @@ -1829,48 +1926,23 @@ packages: resolution: {integrity: sha512-p96FSY54r+WJ50FIOsCOjyj/wavs8921hG5+kVMmZgKcvIKxMXHTrjNJvRgWa/zuX3B6t2lijLNFaOyuxUH+2A==} engines: {node: '>=14.6'} + '@zod/core@0.9.0': + resolution: {integrity: sha512-bVfPiV2kDUkAJ4ArvV4MHcPZA8y3xOX6/SjzSy2kX2ACopbaaAP4wk6hd/byRmfi9MLNai+4SFJMmcATdOyclg==} + acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 - acorn@8.14.1: - resolution: {integrity: sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==} - engines: {node: '>=0.4.0'} - hasBin: true - acorn@8.15.0: resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} engines: {node: '>=0.4.0'} hasBin: true - ansi-regex@5.0.1: - resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} - engines: {node: '>=8'} - - ansi-regex@6.0.1: - resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==} - engines: {node: '>=12'} - ansi-styles@3.2.1: resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} engines: {node: '>=4'} - ansi-styles@4.3.0: - resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} - engines: {node: '>=8'} - - ansi-styles@6.2.1: - resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} - engines: {node: '>=12'} - - any-promise@1.3.0: - resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} - - anymatch@3.1.3: - resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} - engines: {node: '>= 8'} - apg-lite@1.0.5: resolution: {integrity: sha512-SlI+nLMQDzCZfS39ihzjGp3JNBQfJXyMi6cg9tkLOCPVErgFsUIAEdO9IezR7kbP5Xd0ozcPNQBkf9TO5cHgWw==} @@ -1896,13 +1968,6 @@ packages: autolinker@3.16.2: resolution: {integrity: sha512-JiYl7j2Z19F9NdTmirENSUUIIL/9MytEWtmzhfmsKPCp9E+G35Y0UNCMoM9tFigxT59qSc8Ml2dlZXOCVTYwuA==} - autoprefixer@10.4.19: - resolution: {integrity: sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==} - engines: {node: ^10 || ^12 || >=14} - hasBin: true - peerDependencies: - postcss: ^8.1.0 - available-typed-arrays@1.0.7: resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} engines: {node: '>= 0.4'} @@ -1942,10 +2007,6 @@ packages: big.js@5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} - binary-extensions@2.3.0: - resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} - engines: {node: '>=8'} - boolbase@1.0.0: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} @@ -1980,10 +2041,6 @@ packages: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} - camelcase-css@2.0.1: - resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} - engines: {node: '>= 6'} - camelcase@6.3.0: resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} engines: {node: '>=10'} @@ -2025,12 +2082,8 @@ packages: chevrotain@11.0.3: resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} - chokidar@3.6.0: - resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} - engines: {node: '>= 8.10.0'} - - class-variance-authority@0.7.0: - resolution: {integrity: sha512-jFI8IQw4hczaL4ALINxqLEXQbWcNjoSkloa4IaufXCJr6QawJyw7tuRysRsrE8w2p/4gGaxKIt/hX3qz/IbD1A==} + class-variance-authority@0.7.1: + resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==} classnames@2.5.1: resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} @@ -2042,14 +2095,13 @@ packages: resolution: {integrity: sha512-5mOlNS0mhX0707P2I0aZ2V/cmHUEO/fL7VFLqszkhUsxt7RwnmrInf/eEQKlf5GzvYeHIjT+Ov1HRfNmymlG0w==} engines: {node: '>=18'} - clsx@2.0.0: - resolution: {integrity: sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==} - engines: {node: '>=6'} - clsx@2.1.1: resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} engines: {node: '>=6'} + code-block-writer@13.0.3: + resolution: {integrity: sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg==} + collapse-white-space@2.1.0: resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==} @@ -2084,10 +2136,6 @@ packages: resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} engines: {node: '>=18'} - commander@4.1.1: - resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} - engines: {node: '>= 6'} - commander@7.2.0: resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} engines: {node: '>= 10'} @@ -2135,8 +2183,8 @@ packages: typescript: optional: true - cross-spawn@7.0.3: - resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} css-select@5.1.0: @@ -2157,11 +2205,6 @@ packages: css.escape@1.5.1: resolution: {integrity: sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==} - cssesc@3.0.0: - resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} - engines: {node: '>=4'} - hasBin: true - csso@5.0.5: resolution: {integrity: sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==} engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0, npm: '>=7.0.0'} @@ -2179,8 +2222,8 @@ packages: peerDependencies: cytoscape: ^3.2.0 - cytoscape@3.32.0: - resolution: {integrity: sha512-5JHBC9n75kz5851jeklCPmZWcg3hUe6sjqJvyk3+hVqFaKcHwHgxsjeN1yLmggoUc6STbtm9/NQyabQehfjvWQ==} + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} engines: {node: '>=0.10'} d3-array@2.12.1: @@ -2345,8 +2388,8 @@ packages: supports-color: optional: true - debug@4.4.1: - resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -2357,6 +2400,9 @@ packages: decode-named-character-reference@1.1.0: resolution: {integrity: sha512-Wy+JTSbFThEOXQIR2L6mxJvEs+veIzpmqD7ynWxMXGpnk3smkHQOp6forLdHsKpAMW9iJpaBBIxz285t1n1C3w==} + decode-named-character-reference@1.2.0: + resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==} + deep-extend@0.6.0: resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} engines: {node: '>=4.0.0'} @@ -2391,12 +2437,6 @@ packages: devlop@1.1.0: resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} - didyoumean@1.2.2: - resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} - - dlv@1.1.3: - resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} - dom-serializer@2.0.0: resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} @@ -2410,6 +2450,9 @@ packages: dompurify@3.2.6: resolution: {integrity: sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==} + dompurify@3.3.0: + resolution: {integrity: sha512-r+f6MYR1gGN1eJv0TVQbhA7if/U7P87cdPl3HN5rikqaBSBxLiCb/b9O+2eG0cxz0ghyU+mU1QkbsOwERMYlWQ==} + domutils@3.1.0: resolution: {integrity: sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==} @@ -2424,31 +2467,23 @@ packages: resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} engines: {node: '>= 0.4'} - eastasianwidth@0.2.0: - resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} - electron-to-chromium@1.4.819: resolution: {integrity: sha512-8RwI6gKUokbHWcN3iRij/qpvf/wCbIVY5slODi85werwqUQwpFXM+dvUBND93Qh7SB0pW3Hlq3/wZsqQ3M9Jaw==} - emoji-regex-xs@1.0.0: - resolution: {integrity: sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==} - - emoji-regex@8.0.0: - resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} - - emoji-regex@9.2.2: - resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} - emojis-list@3.0.0: resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==} engines: {node: '>= 4'} + enhanced-resolve@5.18.3: + resolution: {integrity: sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==} + engines: {node: '>=10.13.0'} + entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} - entities@6.0.0: - resolution: {integrity: sha512-aKstq2TDOndCn4diEyp9Uq/Flu2i1GlLkc6XIDQSDMuaFE3OPW5OphLCyQ5SpSJZTb4reN+kTcYru5yIfXoRPw==} + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} engines: {node: '>=0.12'} error-ex@1.3.2: @@ -2492,11 +2527,6 @@ packages: resolution: {integrity: sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==} engines: {node: '>=6'} - esprima@4.0.1: - resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} - engines: {node: '>=4'} - hasBin: true - estree-util-attach-comments@3.0.0: resolution: {integrity: sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==} @@ -2515,8 +2545,8 @@ packages: estree-util-to-js@2.0.0: resolution: {integrity: sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==} - estree-util-value-to-estree@3.4.0: - resolution: {integrity: sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==} + estree-util-value-to-estree@3.5.0: + resolution: {integrity: sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==} estree-util-visit@2.0.0: resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==} @@ -2535,10 +2565,6 @@ packages: exsolve@1.0.8: resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} - extend-shallow@2.0.1: - resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} - engines: {node: '>=0.10.0'} - extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} @@ -2549,6 +2575,10 @@ packages: resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} engines: {node: '>=8.6.0'} + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + fast-json-patch@3.1.1: resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==} @@ -2561,6 +2591,15 @@ packages: fault@2.0.1: resolution: {integrity: sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==} + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + fflate@0.4.8: resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} @@ -2568,12 +2607,6 @@ packages: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} - flexsearch@0.7.43: - resolution: {integrity: sha512-c5o/+Um8aqCSOXGcZoqZOm+NqtVwNsvVpWv6lfmSclU954O3wvQKxxK8zj74fPaSJbXpSLTs4PRhh+wnoCXnKg==} - - flexsearch@0.8.158: - resolution: {integrity: sha512-UBOzX2rxIrhAeSSCesTI0qB2Q+75n66rofJx5ppZm5tjXV2P6BxOS3VHKsoSdJhIPg9IMzQl3qkVeSFyq3BUdw==} - follow-redirects@1.15.9: resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} engines: {node: '>=4.0'} @@ -2587,10 +2620,6 @@ packages: resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} engines: {node: '>= 0.4'} - foreground-child@3.2.1: - resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} - engines: {node: '>=14'} - form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -2599,14 +2628,6 @@ packages: resolution: {integrity: sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==} engines: {node: '>=0.4.x'} - fraction.js@4.3.7: - resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} - - fsevents@2.3.3: - resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} - engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} - os: [darwin] - function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} @@ -2633,15 +2654,6 @@ packages: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} - glob-parent@6.0.2: - resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} - engines: {node: '>=10.13.0'} - - glob@10.4.4: - resolution: {integrity: sha512-XsOKvHsu38Xe19ZQupE6N/HENeHQBA05o3hV8labZZT2zYDg1+emxWHnc/Bm9AcCMPXfD6jt+QC7zC5JSFyumw==} - engines: {node: 14 >=14.21 || 16 >=16.20 || 18 || 20 || >=22} - hasBin: true - globals@11.12.0: resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} engines: {node: '>=4'} @@ -2661,10 +2673,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - gray-matter@4.0.3: - resolution: {integrity: sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==} - engines: {node: '>=6.0'} - hachure-fill@0.5.2: resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} @@ -2792,10 +2800,6 @@ packages: is-arrayish@0.3.2: resolution: {integrity: sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==} - is-binary-path@2.1.0: - resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} - engines: {node: '>=8'} - is-callable@1.2.7: resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} engines: {node: '>= 0.4'} @@ -2812,18 +2816,10 @@ packages: engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} hasBin: true - is-extendable@0.1.1: - resolution: {integrity: sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==} - engines: {node: '>=0.10.0'} - is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} - is-fullwidth-code-point@3.0.0: - resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} - engines: {node: '>=8'} - is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} @@ -2866,15 +2862,11 @@ packages: isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} - jackspeak@3.4.2: - resolution: {integrity: sha512-qH3nOSj8q/8+Eg8LUPOq3C+6HWkpUioIjDsq1+D4zY91oZvpPttw8GwtF1nReRYKXl+1AORyFqtm2f5Q1SB6/Q==} - engines: {node: 14 >=14.21 || 16 >=16.20 || >=18} - javascript-stringify@2.1.0: resolution: {integrity: sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg==} - jiti@1.21.6: - resolution: {integrity: sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==} + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} hasBin: true js-file-download@0.4.12: @@ -2883,10 +2875,6 @@ packages: js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} - js-yaml@3.14.1: - resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} - hasBin: true - js-yaml@4.1.0: resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} hasBin: true @@ -2915,17 +2903,13 @@ packages: resolution: {integrity: sha512-8hfl5RD6P7rEeIbzStBz3h4f+BQHfq/ABtoU6gXKQv5OcZhnmrIpG7e1pYaZ8hS9e0mp+bxUj08fnDUbKctYyA==} engines: {node: '>=0.10'} - katex@0.16.22: - resolution: {integrity: sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==} + katex@0.16.25: + resolution: {integrity: sha512-woHRUZ/iF23GBP1dkDQMh1QBad9dmr8/PAwNA54VrSOVYgI12MAcE14TqnDdQOdzyEonGzMepYnqBMYdsoAr8Q==} hasBin: true khroma@2.1.0: resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} - kind-of@6.0.3: - resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} - engines: {node: '>=0.10.0'} - kolorist@1.8.0: resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} @@ -2939,13 +2923,75 @@ packages: layout-base@2.0.1: resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} - lilconfig@2.1.0: - resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} - engines: {node: '>=10'} + lightningcss-android-arm64@1.30.2: + resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [android] - lilconfig@3.1.2: - resolution: {integrity: sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==} - engines: {node: '>=14'} + lightningcss-darwin-arm64@1.30.2: + resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.30.2: + resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.30.2: + resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.30.2: + resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.30.2: + resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.30.2: + resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.30.2: + resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.30.2: + resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.30.2: + resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.30.2: + resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.30.2: + resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==} + engines: {node: '>= 12.0.0'} lines-and-columns@1.2.4: resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} @@ -2967,18 +3013,9 @@ packages: lodash-es@4.17.21: resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} - lodash.castarray@4.4.0: - resolution: {integrity: sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==} - lodash.debounce@4.0.8: resolution: {integrity: sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==} - lodash.isplainobject@4.0.6: - resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} - - lodash.merge@4.6.2: - resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} - lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} @@ -2995,13 +3032,12 @@ packages: lowlight@1.20.0: resolution: {integrity: sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==} - lru-cache@10.4.1: - resolution: {integrity: sha512-8h/JsUc/2+Dm9RPJnBAmObGnUqTMmsIKThxixMLOkrebSihRhTV0wLD/8BSk6OU6Pbj8hiDTbsI3fLjBJSlhDg==} - engines: {node: 14 >= 14.21 || 16 >= 16.20 || 18 >=18.20 || 20 || >=22} - lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + markdown-extensions@2.0.0: resolution: {integrity: sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==} engines: {node: '>=16'} @@ -3097,8 +3133,8 @@ packages: mdast-util-phrasing@4.1.0: resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==} - mdast-util-to-hast@13.2.0: - resolution: {integrity: sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==} + mdast-util-to-hast@13.2.1: + resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==} mdast-util-to-markdown@2.1.2: resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} @@ -3247,6 +3283,10 @@ packages: resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} engines: {node: '>=8.6'} + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + mime-db@1.52.0: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} @@ -3263,21 +3303,17 @@ packages: resolution: {integrity: sha512-bjdr2xW1dBCMsMGGsUeqM4eFI60m94+szhxWys+B1ztIt6gWSfeGBdSVCIawezeHYLYn0j6zrsXdQS/JllBzww==} engines: {node: '>=6'} + minimatch@10.1.1: + resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==} + engines: {node: 20 || >=22} + minimatch@7.4.6: resolution: {integrity: sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw==} engines: {node: '>=10'} - minimatch@9.0.5: - resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} - engines: {node: '>=16 || 14 >=14.17'} - minimist@1.2.8: resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} - minipass@7.1.2: - resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} - engines: {node: '>=16 || 14 >=14.17'} - mj-context-menu@0.6.1: resolution: {integrity: sha512-7NO5s6n10TIV96d4g2uDpG7ZDpIhMh0QNfGdJw/W47JswFcosz457wqz/b5sAKvl12sxINGFCn80NZHKwxQEXA==} @@ -3293,19 +3329,11 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - mz@2.7.0: - resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} - nanoid@3.3.11: resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - nanoid@3.3.7: - resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} - engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} - hasBin: true - negotiator@1.0.0: resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} engines: {node: '>= 0.6'} @@ -3348,19 +3376,19 @@ packages: sass: optional: true - nextra-theme-docs@3.3.1: - resolution: {integrity: sha512-P305m2UcW2IDyQhjrcAu0qpdPArikofinABslUCAyixYShsmcdDRUhIMd4QBHYru4gQuVjGWX9PhWZZCbNvzDQ==} + nextra-theme-docs@4.6.0: + resolution: {integrity: sha512-lAFveL2sFZ6NRr602MTwsQK1bjVYYbuHkQlsrHNutwIV6YvD9IruP7M8WUXEMasjH6RY6bVN/BDS/qO7NJgbgg==} peerDependencies: - next: '>=13' - nextra: 3.3.1 + next: '>=14' + nextra: 4.6.0 react: '>=18' react-dom: '>=18' - nextra@3.3.1: - resolution: {integrity: sha512-jiwj+LfUPHHeAxJAEqFuglxnbjFgzAOnDWFsjv7iv3BWiX8OksDwd3I2Sv3j2zba00iIBDEPdNeylfzTtTLZVg==} + nextra@4.6.0: + resolution: {integrity: sha512-7kIBqQm2aEdHTtglcKDf8ZZMfPErY8iVym2a7ujEWUoHbCc5zsWloYdrtSHDRTmOH/hCqSsWJDZX+2lleKQscw==} engines: {node: '>=18'} peerDependencies: - next: '>=13' + next: '>=14' react: '>=18' react-dom: '>=18' @@ -3393,14 +3421,6 @@ packages: node-releases@2.0.14: resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} - normalize-path@3.0.0: - resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} - engines: {node: '>=0.10.0'} - - normalize-range@0.1.2: - resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} - engines: {node: '>=0.10.0'} - npm-run-path@5.3.0: resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -3416,16 +3436,15 @@ packages: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} - object-hash@3.0.0: - resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} - engines: {node: '>= 6'} - onetime@6.0.0: resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} engines: {node: '>=12'} - oniguruma-to-es@2.3.0: - resolution: {integrity: sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g==} + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.4: + resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} openapi-path-templating@2.2.1: resolution: {integrity: sha512-eN14VrDvl/YyGxxrkGOHkVkWEoPyhyeydOUrbvjoz8K5eIGgELASwN1eqFOJ2CTQMGCy2EntOK1KdtJ8ZMekcg==} @@ -3435,16 +3454,13 @@ packages: resolution: {integrity: sha512-DPlCms3KKEbjVQb0spV6Awfn6UWNheuG/+folQPzh/wUaKwuqvj8zt5gagD7qoyxtE03cIiKPgLFS3Q8Bz00uQ==} engines: {node: '>=12.20.0'} - p-limit@6.2.0: - resolution: {integrity: sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA==} - engines: {node: '>=18'} - - package-json-from-dist@1.0.0: - resolution: {integrity: sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==} - package-manager-detector@1.5.0: resolution: {integrity: sha512-uBj69dVlYe/+wxj8JOpr97XfsxH/eumMt6HqjNTmJDf/6NO9s+0uxeOneIz3AsPt2m6y9PqzDzd3ATcU17MNfw==} + pagefind@1.4.0: + resolution: {integrity: sha512-z2kY1mQlL4J8q5EIsQkLzQjilovKzfNVhX8De6oyE6uHpfFtyBaqUpcl/XzJC/4fjD8vBDyh1zolimIcVrCn9g==} + hasBin: true + parent-module@1.0.1: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} @@ -3465,6 +3481,9 @@ packages: parse5@7.3.0: resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + path-data-parser@0.1.0: resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} @@ -3479,10 +3498,6 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} - path-scurry@1.11.1: - resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} - engines: {node: '>=16 || 14 >=14.18'} - path-type@4.0.0: resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} engines: {node: '>=8'} @@ -3504,13 +3519,9 @@ packages: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} - pify@2.3.0: - resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} - engines: {node: '>=0.10.0'} - - pirates@4.0.6: - resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} - engines: {node: '>= 6'} + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} pkg-types@1.3.1: resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} @@ -3528,58 +3539,13 @@ packages: resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} engines: {node: '>= 0.4'} - postcss-import@15.1.0: - resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} - engines: {node: '>=14.0.0'} - peerDependencies: - postcss: ^8.0.0 + postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} - postcss-js@4.0.1: - resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==} - engines: {node: ^12 || ^14 || >= 16} - peerDependencies: - postcss: ^8.4.21 - - postcss-load-config@4.0.2: - resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==} - engines: {node: '>= 14'} - peerDependencies: - postcss: '>=8.0.9' - ts-node: '>=9.0.0' - peerDependenciesMeta: - postcss: - optional: true - ts-node: - optional: true - - postcss-nested@6.0.1: - resolution: {integrity: sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==} - engines: {node: '>=12.0'} - peerDependencies: - postcss: ^8.2.14 - - postcss-selector-parser@6.0.10: - resolution: {integrity: sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==} - engines: {node: '>=4'} - - postcss-selector-parser@6.1.0: - resolution: {integrity: sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==} - engines: {node: '>=4'} - - postcss-value-parser@4.2.0: - resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} - - postcss@8.4.31: - resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} - engines: {node: ^10 || ^12 || >=14} - - postcss@8.4.39: - resolution: {integrity: sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==} - engines: {node: ^10 || ^12 || >=14} - - postcss@8.5.6: - resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} - engines: {node: ^10 || ^12 || >=14} + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} posthog-js@1.223.5: resolution: {integrity: sha512-QCapVOZ0zusWR2BryAc3utuEwlsK4xhbpaHWi56cUJwdHOi3gThmXL/bpS5KZtYAJN3UUEwN5Ef3IcfDLp9fMQ==} @@ -3643,6 +3609,11 @@ packages: randombytes@2.1.0: resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + react-compiler-runtime@19.1.0-rc.3: + resolution: {integrity: sha512-Cssogys2XZu6SqxRdX2xd8cQAf57BBvFbLEBlIa77161lninbKUn/EqbecCe7W3eqDQfg3rIoOwzExzgCh7h/g==} + peerDependencies: + react: ^17.0.0 || ^18.0.0 || ^19.0.0 || ^0.0.0-experimental + react-copy-to-clipboard@5.1.0: resolution: {integrity: sha512-k61RsNgAayIJNoy9yDsYzDe/yAZAzEbEgcz3DZMhF686LEyukcE1hzurxe85JandPUG+yTfGVFzuEw3xt8WP/A==} peerDependencies: @@ -3712,13 +3683,6 @@ packages: resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==} engines: {node: '>=0.10.0'} - read-cache@1.0.0: - resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} - - readdirp@3.6.0: - resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} - engines: {node: '>=8.10.0'} - reading-time@1.5.0: resolution: {integrity: sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==} @@ -3760,14 +3724,14 @@ packages: regenerator-transform@0.15.2: resolution: {integrity: sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==} - regex-recursion@5.1.1: - resolution: {integrity: sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w==} + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} regex-utilities@2.3.0: resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} - regex@5.1.1: - resolution: {integrity: sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw==} + regex@6.0.1: + resolution: {integrity: sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==} regexpu-core@5.3.2: resolution: {integrity: sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==} @@ -3783,11 +3747,11 @@ packages: rehype-parse@9.0.1: resolution: {integrity: sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==} - rehype-pretty-code@0.14.0: - resolution: {integrity: sha512-hBeKF/Wkkf3zyUS8lal9RCUuhypDWLQc+h9UrP9Pav25FUm/AQAVh4m5gdvJxh4Oz+U+xKvdsV01p1LdvsZTiQ==} + rehype-pretty-code@0.14.1: + resolution: {integrity: sha512-IpG4OL0iYlbx78muVldsK86hdfNoht0z63AP7sekQNW2QOTmjxB7RbTO+rhIYNGRljgHxgVZoPwUl6bIC9SbjA==} engines: {node: '>=18'} peerDependencies: - shiki: ^1.3.0 + shiki: ^1.0.0 || ^2.0.0 || ^3.0.0 rehype-raw@7.0.0: resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} @@ -3893,10 +3857,6 @@ packages: scroll-into-view-if-needed@3.1.0: resolution: {integrity: sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==} - section-matter@1.0.0: - resolution: {integrity: sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==} - engines: {node: '>=4'} - semver@6.3.1: resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} hasBin: true @@ -3915,6 +3875,9 @@ packages: resolution: {integrity: sha512-3NnuWfM6vBYoy5gZFvHiYsVbafvI9vZv/+jlIigFn4oP4zjNPK3LhcY0xSCgeb1a5L8jO71Mit9LlNoi2UfDDQ==} engines: {node: '>=10'} + server-only@0.0.1: + resolution: {integrity: sha512-qepMx2JxAa5jjfzxG79yPPq+8BuFToHd1hm7kI+Z4zAq1ftQiP7HcxMhDDItrbtwVeLg/cY2JnKnrcFkmiswNA==} + set-function-length@1.2.2: resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} engines: {node: '>= 0.4'} @@ -3940,8 +3903,8 @@ packages: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} - shiki@1.29.2: - resolution: {integrity: sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg==} + shiki@3.15.0: + resolution: {integrity: sha512-kLdkY6iV3dYbtPwS9KXU7mjfmDm25f5m0IPNFnaXO7TBPcvbUOY72PYXSuSqDzwp+vlH/d7MXpHlKO/x+QoLXw==} short-unique-id@5.3.2: resolution: {integrity: sha512-KRT/hufMSxXKEDSQujfVE0Faa/kZ51ihUcZQAcmP04t00DvPj7Ox5anHke1sJYUtzSuiT/Y5uyzg/W7bBEGhCg==} @@ -3972,9 +3935,9 @@ packages: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} - source-map@0.7.4: - resolution: {integrity: sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==} - engines: {node: '>= 8'} + source-map@0.7.6: + resolution: {integrity: sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==} + engines: {node: '>= 12'} space-separated-tokens@2.0.2: resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} @@ -3986,29 +3949,9 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - string-width@4.2.3: - resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} - engines: {node: '>=8'} - - string-width@5.1.2: - resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} - engines: {node: '>=12'} - stringify-entities@4.0.4: resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} - strip-ansi@6.0.1: - resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} - engines: {node: '>=8'} - - strip-ansi@7.1.0: - resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} - engines: {node: '>=12'} - - strip-bom-string@1.0.0: - resolution: {integrity: sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==} - engines: {node: '>=0.10.0'} - strip-final-newline@3.0.0: resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} engines: {node: '>=12'} @@ -4035,11 +3978,6 @@ packages: stylis@4.3.6: resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} - sucrase@3.35.0: - resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} - engines: {node: '>=16 || 14 >=14.17'} - hasBin: true - supports-color@5.5.0: resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} engines: {node: '>=4'} @@ -4072,25 +4010,24 @@ packages: tabbable@6.3.0: resolution: {integrity: sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ==} - tailwind-merge@2.4.0: - resolution: {integrity: sha512-49AwoOQNKdqKPd9CViyH5wJoSKsCDjUlzL8DxuGp3P1FsGY36NJDAa18jLZcaHAUUuTj+JB8IAo8zWgBNvBF7A==} - - tailwindcss@3.4.4: - resolution: {integrity: sha512-ZoyXOdJjISB7/BcLTR6SEsLgKtDStYyYZVLsUtWChO4Ps20CBad7lfJKVDiejocV4ME1hLmyY0WJE3hSDcmQ2A==} - engines: {node: '>=14.0.0'} - hasBin: true + tailwind-merge@3.4.0: + resolution: {integrity: sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==} - thenify-all@1.6.0: - resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} - engines: {node: '>=0.8'} + tailwindcss@4.1.17: + resolution: {integrity: sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==} - thenify@3.3.1: - resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + tapable@2.3.0: + resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} + engines: {node: '>=6'} tinyexec@1.0.2: resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} engines: {node: '>=18'} + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + title@4.0.1: resolution: {integrity: sha512-xRnPkJx9nvE5MF6LkB5e8QJjE2FW8269wTu/LQdf7zZqBgPly0QJPf/CWAo7srj5so4yXfoLEdCFgurlpi47zg==} hasBin: true @@ -4134,12 +4071,12 @@ packages: resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} engines: {node: '>=6.10'} - ts-interface-checker@0.1.13: - resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} - ts-mixer@6.0.4: resolution: {integrity: sha512-ufKpbmrugz5Aou4wcr5Wc1UUFWOLhq+Fm6qa6P0w0K5Qw2yhaUoiWszhCVuNQyNwrlGiscHOmqYoAox1PtvgjA==} + ts-morph@27.0.2: + resolution: {integrity: sha512-fhUhgeljcrdZ+9DZND1De1029PrE+cMkIP7ooqkLRTrRLTqcki2AstsyJm0vRNbTbVCNJ0idGlbBrfqc7/nA8w==} + ts-toolbelt@9.6.0: resolution: {integrity: sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==} @@ -4149,13 +4086,13 @@ packages: tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} - twoslash-protocol@0.2.12: - resolution: {integrity: sha512-5qZLXVYfZ9ABdjqbvPc4RWMr7PrpPaaDSeaYY55vl/w1j6H6kzsWK/urAEIXlzYlyrFmyz1UbwIt+AA0ck+wbg==} + twoslash-protocol@0.3.4: + resolution: {integrity: sha512-HHd7lzZNLUvjPzG/IE6js502gEzLC1x7HaO1up/f72d8G8ScWAs9Yfa97igelQRDl5h9tGcdFsRp+lNVre1EeQ==} - twoslash@0.2.12: - resolution: {integrity: sha512-tEHPASMqi7kqwfJbkk7hc/4EhlrKCSLcur+TcvYki3vhIfaRMXnXjaYFgXpoZRbT6GdprD4tGuVBEmTpUgLBsw==} + twoslash@0.3.4: + resolution: {integrity: sha512-RtJURJlGRxrkJmTcZMjpr7jdYly1rfgpujJr1sBM9ch7SKVht/SjFk23IOAyvwT1NLCk+SJiMrvW4rIAUM2Wug==} peerDependencies: - typescript: '*' + typescript: ^5.5.0 type-fest@0.20.2: resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} @@ -4168,9 +4105,9 @@ packages: types-ramda@0.30.1: resolution: {integrity: sha512-1HTsf5/QVRmLzcGfldPFvkVsAdi1db1BBKzi7iW3KBUlOICg/nKnFS+jGqDJS3YD8VsWbAh7JiHeBvbsw8RPxA==} - typescript@4.9.5: - resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} - engines: {node: '>=4.2.0'} + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} hasBin: true uc.micro@2.1.0: @@ -4214,6 +4151,9 @@ packages: unist-util-is@6.0.0: resolution: {integrity: sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==} + unist-util-is@6.0.1: + resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==} + unist-util-modify-children@4.0.0: resolution: {integrity: sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw==} @@ -4241,6 +4181,9 @@ packages: unist-util-visit-parents@6.0.1: resolution: {integrity: sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==} + unist-util-visit-parents@6.0.2: + resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==} + unist-util-visit@3.1.0: resolution: {integrity: sha512-Szoh+R/Ll68QWAyQyZZpQzZQm2UPbxibDvaY8Xc9SUtYgPsDzx5AWSk++UUt2hJuow8mvwR+rG+LQLw+KsuAKA==} @@ -4264,8 +4207,10 @@ packages: peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - util-deprecate@1.0.2: - resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + use-sync-external-store@1.6.0: + resolution: {integrity: sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 uuid@11.1.0: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} @@ -4277,6 +4222,9 @@ packages: vfile-message@4.0.2: resolution: {integrity: sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==} + vfile-message@4.0.3: + resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==} + vfile@6.0.3: resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} @@ -4325,14 +4273,6 @@ packages: wicked-good-xpath@1.3.0: resolution: {integrity: sha512-Gd9+TUn5nXdwj/hFsPVx5cuHHiF5Bwuc30jZ4+ronF1qHK5O7HD0sgmXWSEgwKquT3ClLoKPVbO6qGwVwLzvAw==} - wrap-ansi@7.0.0: - resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} - engines: {node: '>=10'} - - wrap-ansi@8.1.0: - resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} - engines: {node: '>=12'} - xml-but-prettier@1.0.1: resolution: {integrity: sha512-C2CJaadHrZTqESlH03WOyw0oZTtoy2uEg6dSDF6YRg+9GnYNub53RRemLpnvtbHDFelxMx4LajiFsYeR6XJHgQ==} @@ -4351,9 +4291,10 @@ packages: engines: {node: '>= 14'} hasBin: true - yocto-queue@1.2.2: - resolution: {integrity: sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==} - engines: {node: '>=12.20'} + yaml@2.8.1: + resolution: {integrity: sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==} + engines: {node: '>= 14.6'} + hasBin: true youtube-player@5.5.2: resolution: {integrity: sha512-ZGtsemSpXnDky2AUYWgxjaopgB+shFHgXVpiJFeNB5nWEugpW1KWYDaHKuLqh2b67r24GtP6HoSW5swvf0fFIQ==} @@ -4361,14 +4302,26 @@ packages: zenscroll@4.0.2: resolution: {integrity: sha512-jEA1znR7b4C/NnaycInCU6h/d15ZzCd1jmsruqOKnZP6WXQSMH3W2GL+OXbkruslU4h+Tzuos0HdswzRUk/Vgg==} - zod-validation-error@3.5.4: - resolution: {integrity: sha512-+hEiRIiPobgyuFlEojnqjJnhFvg4r/i3cqgcm67eehZf/WBaK3g6cD02YU9mtdVxZjv8CzCA9n/Rhrs3yAAvAw==} - engines: {node: '>=18.0.0'} - peerDependencies: - zod: ^3.24.4 + zod@4.0.0-beta.20250424T163858: + resolution: {integrity: sha512-fKhW+lEJnfUGo0fvQjmam39zUytARR2UdCEh7/OXJSBbKScIhD343K74nW+UUHu/r6dkzN6Uc/GqwogFjzpCXg==} - zod@3.25.49: - resolution: {integrity: sha512-JMMPMy9ZBk3XFEdbM3iL1brx4NUSejd6xr3ELrrGEfGb355gjhiAWtG3K5o+AViV/3ZfkIrCzXsZn6SbLwTR8Q==} + zustand@5.0.8: + resolution: {integrity: sha512-gyPKpIaxY9XcO2vSMrLbiER7QMAMGOQZVRdJ6Zi782jkbzZygq5GI9nG8g+sMgitRtndwaBSl7uiqC49o1SSiw==} + engines: {node: '>=12.20.0'} + peerDependencies: + '@types/react': '>=18.0.0' + immer: '>=9.0.6' + react: '>=18.0.0' + use-sync-external-store: '>=1.2.0' + peerDependenciesMeta: + '@types/react': + optional: true + immer: + optional: true + react: + optional: true + use-sync-external-store: + optional: true zwitch@2.0.4: resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} @@ -5296,7 +5249,7 @@ snapshots: '@floating-ui/utils@0.2.10': {} - '@formatjs/intl-localematcher@0.5.10': + '@formatjs/intl-localematcher@0.6.2': dependencies: tslib: 2.8.1 @@ -5320,13 +5273,6 @@ snapshots: prop-types: 15.8.1 react: 18.3.1 - '@headlessui/react@1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@tanstack/react-virtual': 3.8.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - client-only: 0.0.1 - react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) - '@headlessui/react@2.2.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@floating-ui/react': 0.26.28(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -5335,7 +5281,7 @@ snapshots: '@tanstack/react-virtual': 3.13.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - use-sync-external-store: 1.5.0(react@18.3.1) + use-sync-external-store: 1.6.0(react@18.3.1) '@iconify/types@2.0.0': {} @@ -5344,7 +5290,7 @@ snapshots: '@antfu/install-pkg': 1.1.0 '@antfu/utils': 9.3.0 '@iconify/types': 2.0.0 - debug: 4.4.1 + debug: 4.4.3 globals: 15.15.0 kolorist: 1.8.0 local-pkg: 1.1.2 @@ -5527,14 +5473,11 @@ snapshots: '@img/sharp-win32-x64@0.34.5': optional: true - '@isaacs/cliui@8.0.2': + '@isaacs/balanced-match@4.0.1': {} + + '@isaacs/brace-expansion@5.0.0': dependencies: - string-width: 5.1.2 - string-width-cjs: string-width@4.2.3 - strip-ansi: 7.1.0 - strip-ansi-cjs: strip-ansi@6.0.1 - wrap-ansi: 8.1.0 - wrap-ansi-cjs: wrap-ansi@7.0.0 + '@isaacs/balanced-match': 4.0.1 '@jridgewell/gen-mapping@0.3.5': dependencies: @@ -5542,12 +5485,19 @@ snapshots: '@jridgewell/sourcemap-codec': 1.4.15 '@jridgewell/trace-mapping': 0.3.25 + '@jridgewell/remapping@2.3.5': + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + '@jridgewell/resolve-uri@3.1.2': {} '@jridgewell/set-array@1.2.1': {} '@jridgewell/sourcemap-codec@1.4.15': {} + '@jridgewell/sourcemap-codec@1.5.5': {} + '@jridgewell/trace-mapping@0.3.25': dependencies: '@jridgewell/resolve-uri': 3.1.2 @@ -5555,11 +5505,11 @@ snapshots: '@mdx-js/mdx@3.1.1': dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 '@types/estree-jsx': 1.0.5 '@types/hast': 3.0.4 '@types/mdx': 2.0.13 - acorn: 8.14.1 + acorn: 8.15.0 collapse-white-space: 2.1.0 devlop: 1.1.0 estree-util-is-identifier-name: 3.0.0 @@ -5568,13 +5518,13 @@ snapshots: hast-util-to-jsx-runtime: 2.3.6 markdown-extensions: 2.0.0 recma-build-jsx: 1.0.0 - recma-jsx: 1.0.1(acorn@8.14.1) + recma-jsx: 1.0.1(acorn@8.15.0) recma-stringify: 1.0.0 rehype-recma: 1.0.0 remark-mdx: 3.1.1 remark-parse: 11.0.0 remark-rehype: 11.1.2 - source-map: 0.7.4 + source-map: 0.7.6 unified: 11.0.5 unist-util-position-from-estree: 2.0.0 unist-util-stringify-position: 4.0.0 @@ -5583,74 +5533,72 @@ snapshots: transitivePeerDependencies: - supports-color - '@mdx-js/react@3.1.1(@types/react@19.1.6)(react@18.3.1)': - dependencies: - '@types/mdx': 2.0.13 - '@types/react': 19.1.6 - react: 18.3.1 - '@mermaid-js/parser@0.6.3': dependencies: langium: 3.3.1 - '@napi-rs/simple-git-android-arm-eabi@0.1.19': + '@napi-rs/simple-git-android-arm-eabi@0.1.22': + optional: true + + '@napi-rs/simple-git-android-arm64@0.1.22': optional: true - '@napi-rs/simple-git-android-arm64@0.1.19': + '@napi-rs/simple-git-darwin-arm64@0.1.22': optional: true - '@napi-rs/simple-git-darwin-arm64@0.1.19': + '@napi-rs/simple-git-darwin-x64@0.1.22': optional: true - '@napi-rs/simple-git-darwin-x64@0.1.19': + '@napi-rs/simple-git-freebsd-x64@0.1.22': optional: true - '@napi-rs/simple-git-freebsd-x64@0.1.19': + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.22': optional: true - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': + '@napi-rs/simple-git-linux-arm64-gnu@0.1.22': optional: true - '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': + '@napi-rs/simple-git-linux-arm64-musl@0.1.22': optional: true - '@napi-rs/simple-git-linux-arm64-musl@0.1.19': + '@napi-rs/simple-git-linux-ppc64-gnu@0.1.22': optional: true - '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': + '@napi-rs/simple-git-linux-s390x-gnu@0.1.22': optional: true - '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': + '@napi-rs/simple-git-linux-x64-gnu@0.1.22': optional: true - '@napi-rs/simple-git-linux-x64-gnu@0.1.19': + '@napi-rs/simple-git-linux-x64-musl@0.1.22': optional: true - '@napi-rs/simple-git-linux-x64-musl@0.1.19': + '@napi-rs/simple-git-win32-arm64-msvc@0.1.22': optional: true - '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': + '@napi-rs/simple-git-win32-ia32-msvc@0.1.22': optional: true - '@napi-rs/simple-git-win32-x64-msvc@0.1.19': + '@napi-rs/simple-git-win32-x64-msvc@0.1.22': optional: true - '@napi-rs/simple-git@0.1.19': + '@napi-rs/simple-git@0.1.22': optionalDependencies: - '@napi-rs/simple-git-android-arm-eabi': 0.1.19 - '@napi-rs/simple-git-android-arm64': 0.1.19 - '@napi-rs/simple-git-darwin-arm64': 0.1.19 - '@napi-rs/simple-git-darwin-x64': 0.1.19 - '@napi-rs/simple-git-freebsd-x64': 0.1.19 - '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.19 - '@napi-rs/simple-git-linux-arm64-gnu': 0.1.19 - '@napi-rs/simple-git-linux-arm64-musl': 0.1.19 - '@napi-rs/simple-git-linux-powerpc64le-gnu': 0.1.19 - '@napi-rs/simple-git-linux-s390x-gnu': 0.1.19 - '@napi-rs/simple-git-linux-x64-gnu': 0.1.19 - '@napi-rs/simple-git-linux-x64-musl': 0.1.19 - '@napi-rs/simple-git-win32-arm64-msvc': 0.1.19 - '@napi-rs/simple-git-win32-x64-msvc': 0.1.19 + '@napi-rs/simple-git-android-arm-eabi': 0.1.22 + '@napi-rs/simple-git-android-arm64': 0.1.22 + '@napi-rs/simple-git-darwin-arm64': 0.1.22 + '@napi-rs/simple-git-darwin-x64': 0.1.22 + '@napi-rs/simple-git-freebsd-x64': 0.1.22 + '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.22 + '@napi-rs/simple-git-linux-arm64-gnu': 0.1.22 + '@napi-rs/simple-git-linux-arm64-musl': 0.1.22 + '@napi-rs/simple-git-linux-ppc64-gnu': 0.1.22 + '@napi-rs/simple-git-linux-s390x-gnu': 0.1.22 + '@napi-rs/simple-git-linux-x64-gnu': 0.1.22 + '@napi-rs/simple-git-linux-x64-musl': 0.1.22 + '@napi-rs/simple-git-win32-arm64-msvc': 0.1.22 + '@napi-rs/simple-git-win32-ia32-msvc': 0.1.22 + '@napi-rs/simple-git-win32-x64-msvc': 0.1.22 '@next/env@13.5.6': {} @@ -5692,18 +5640,33 @@ snapshots: '@nodelib/fs.scandir': 2.1.5 fastq: 1.17.1 - '@pkgjs/parseargs@0.11.0': + '@pagefind/darwin-arm64@1.4.0': + optional: true + + '@pagefind/darwin-x64@1.4.0': + optional: true + + '@pagefind/freebsd-x64@1.4.0': + optional: true + + '@pagefind/linux-arm64@1.4.0': + optional: true + + '@pagefind/linux-x64@1.4.0': + optional: true + + '@pagefind/windows-x64@1.4.0': optional: true - '@radix-ui/react-compose-refs@1.1.0(@types/react@19.1.6)(react@18.3.1)': + '@radix-ui/react-compose-refs@1.1.2(@types/react@19.1.6)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: '@types/react': 19.1.6 - '@radix-ui/react-slot@1.1.0(@types/react@19.1.6)(react@18.3.1)': + '@radix-ui/react-slot@1.2.4(@types/react@19.1.6)(react@18.3.1)': dependencies: - '@radix-ui/react-compose-refs': 1.1.0(@types/react@19.1.6)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@18.3.1) react: 18.3.1 optionalDependencies: '@types/react': 19.1.6 @@ -5713,7 +5676,7 @@ snapshots: '@react-aria/interactions': 3.25.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@react-aria/utils': 3.31.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@react-types/shared': 3.32.1(react@18.3.1) - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 clsx: 2.1.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -5724,13 +5687,13 @@ snapshots: '@react-aria/utils': 3.31.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@react-stately/flags': 3.1.2 '@react-types/shared': 3.32.1(react@18.3.1) - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) '@react-aria/ssr@3.9.10(react@18.3.1)': dependencies: - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 react: 18.3.1 '@react-aria/utils@3.31.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': @@ -5739,18 +5702,18 @@ snapshots: '@react-stately/flags': 3.1.2 '@react-stately/utils': 3.10.8(react@18.3.1) '@react-types/shared': 3.32.1(react@18.3.1) - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 clsx: 2.1.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) '@react-stately/flags@3.1.2': dependencies: - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 '@react-stately/utils@3.10.8(react@18.3.1)': dependencies: - '@swc/helpers': 0.5.15 + '@swc/helpers': 0.5.17 react: 18.3.1 '@react-types/shared@3.32.1(react@18.3.1)': @@ -5767,44 +5730,42 @@ snapshots: dependencies: jstz: 2.1.1 - '@shikijs/core@1.29.2': + '@shikijs/core@3.15.0': dependencies: - '@shikijs/engine-javascript': 1.29.2 - '@shikijs/engine-oniguruma': 1.29.2 - '@shikijs/types': 1.29.2 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 hast-util-to-html: 9.0.5 - '@shikijs/engine-javascript@1.29.2': + '@shikijs/engine-javascript@3.15.0': dependencies: - '@shikijs/types': 1.29.2 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 - oniguruma-to-es: 2.3.0 + oniguruma-to-es: 4.3.4 - '@shikijs/engine-oniguruma@1.29.2': + '@shikijs/engine-oniguruma@3.15.0': dependencies: - '@shikijs/types': 1.29.2 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 - '@shikijs/langs@1.29.2': + '@shikijs/langs@3.15.0': dependencies: - '@shikijs/types': 1.29.2 + '@shikijs/types': 3.15.0 - '@shikijs/themes@1.29.2': + '@shikijs/themes@3.15.0': dependencies: - '@shikijs/types': 1.29.2 + '@shikijs/types': 3.15.0 - '@shikijs/twoslash@1.29.2(typescript@4.9.5)': + '@shikijs/twoslash@3.15.0(typescript@5.9.3)': dependencies: - '@shikijs/core': 1.29.2 - '@shikijs/types': 1.29.2 - twoslash: 0.2.12(typescript@4.9.5) + '@shikijs/core': 3.15.0 + '@shikijs/types': 3.15.0 + twoslash: 0.3.4(typescript@5.9.3) + typescript: 5.9.3 transitivePeerDependencies: - supports-color - - typescript - '@shikijs/types@1.29.2': + '@shikijs/types@3.15.0': dependencies: '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 @@ -5857,12 +5818,12 @@ snapshots: '@svgr/babel-plugin-transform-react-native-svg': 8.1.0(@babel/core@7.24.7) '@svgr/babel-plugin-transform-svg-component': 8.0.0(@babel/core@7.24.7) - '@svgr/core@8.1.0(typescript@4.9.5)': + '@svgr/core@8.1.0(typescript@5.9.3)': dependencies: '@babel/core': 7.24.7 '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) camelcase: 6.3.0 - cosmiconfig: 8.3.6(typescript@4.9.5) + cosmiconfig: 8.3.6(typescript@5.9.3) snake-case: 3.0.4 transitivePeerDependencies: - supports-color @@ -5873,35 +5834,35 @@ snapshots: '@babel/types': 7.24.7 entities: 4.5.0 - '@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0(typescript@4.9.5))': + '@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0(typescript@5.9.3))': dependencies: '@babel/core': 7.24.7 '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) - '@svgr/core': 8.1.0(typescript@4.9.5) + '@svgr/core': 8.1.0(typescript@5.9.3) '@svgr/hast-util-to-babel-ast': 8.0.0 svg-parser: 2.0.4 transitivePeerDependencies: - supports-color - '@svgr/plugin-svgo@8.1.0(@svgr/core@8.1.0(typescript@4.9.5))(typescript@4.9.5)': + '@svgr/plugin-svgo@8.1.0(@svgr/core@8.1.0(typescript@5.9.3))(typescript@5.9.3)': dependencies: - '@svgr/core': 8.1.0(typescript@4.9.5) - cosmiconfig: 8.3.6(typescript@4.9.5) + '@svgr/core': 8.1.0(typescript@5.9.3) + cosmiconfig: 8.3.6(typescript@5.9.3) deepmerge: 4.3.1 svgo: 3.3.2 transitivePeerDependencies: - typescript - '@svgr/webpack@8.1.0(typescript@4.9.5)': + '@svgr/webpack@8.1.0(typescript@5.9.3)': dependencies: '@babel/core': 7.24.7 '@babel/plugin-transform-react-constant-elements': 7.24.7(@babel/core@7.24.7) '@babel/preset-env': 7.24.7(@babel/core@7.24.7) '@babel/preset-react': 7.24.7(@babel/core@7.24.7) '@babel/preset-typescript': 7.24.7(@babel/core@7.24.7) - '@svgr/core': 8.1.0(typescript@4.9.5) - '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@4.9.5)) - '@svgr/plugin-svgo': 8.1.0(@svgr/core@8.1.0(typescript@4.9.5))(typescript@4.9.5) + '@svgr/core': 8.1.0(typescript@5.9.3) + '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@5.9.3)) + '@svgr/plugin-svgo': 8.1.0(@svgr/core@8.1.0(typescript@5.9.3))(typescript@5.9.3) transitivePeerDependencies: - supports-color - typescript @@ -6269,35 +6230,88 @@ snapshots: dependencies: tslib: 2.8.1 - '@tailwindcss/line-clamp@0.4.4(tailwindcss@3.4.4)': + '@swc/helpers@0.5.17': dependencies: - tailwindcss: 3.4.4 + tslib: 2.8.1 - '@tailwindcss/typography@0.5.13(tailwindcss@3.4.4)': + '@tailwindcss/node@4.1.17': dependencies: - lodash.castarray: 4.4.0 - lodash.isplainobject: 4.0.6 - lodash.merge: 4.6.2 - postcss-selector-parser: 6.0.10 - tailwindcss: 3.4.4 + '@jridgewell/remapping': 2.3.5 + enhanced-resolve: 5.18.3 + jiti: 2.6.1 + lightningcss: 1.30.2 + magic-string: 0.30.21 + source-map-js: 1.2.1 + tailwindcss: 4.1.17 - '@tanstack/react-virtual@3.13.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tailwindcss/oxide-android-arm64@4.1.17': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.1.17': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.1.17': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.1.17': + optional: true + + '@tailwindcss/oxide-wasm32-wasi@4.1.17': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.17': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.1.17': + optional: true + + '@tailwindcss/oxide@4.1.17': + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.1.17 + '@tailwindcss/oxide-darwin-arm64': 4.1.17 + '@tailwindcss/oxide-darwin-x64': 4.1.17 + '@tailwindcss/oxide-freebsd-x64': 4.1.17 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.17 + '@tailwindcss/oxide-linux-arm64-gnu': 4.1.17 + '@tailwindcss/oxide-linux-arm64-musl': 4.1.17 + '@tailwindcss/oxide-linux-x64-gnu': 4.1.17 + '@tailwindcss/oxide-linux-x64-musl': 4.1.17 + '@tailwindcss/oxide-wasm32-wasi': 4.1.17 + '@tailwindcss/oxide-win32-arm64-msvc': 4.1.17 + '@tailwindcss/oxide-win32-x64-msvc': 4.1.17 + + '@tailwindcss/postcss@4.1.17': dependencies: - '@tanstack/virtual-core': 3.13.12 - react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.1.17 + '@tailwindcss/oxide': 4.1.17 + postcss: 8.5.6 + tailwindcss: 4.1.17 - '@tanstack/react-virtual@3.8.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tanstack/react-virtual@3.13.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/virtual-core': 3.8.1 + '@tanstack/virtual-core': 3.13.12 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) '@tanstack/virtual-core@3.13.12': {} - '@tanstack/virtual-core@3.8.1': {} - - '@theguild/remark-mermaid@0.1.3(react@18.3.1)': + '@theguild/remark-mermaid@0.3.0(react@18.3.1)': dependencies: mermaid: 11.12.1 react: 18.3.1 @@ -6320,6 +6334,12 @@ snapshots: '@trysound/sax@0.2.0': {} + '@ts-morph/common@0.28.1': + dependencies: + minimatch: 10.1.1 + path-browserify: 1.0.1 + tinyglobby: 0.2.15 + '@types/d3-array@3.2.2': {} '@types/d3-axis@3.0.6': @@ -6443,9 +6463,9 @@ snapshots: '@types/estree-jsx@1.0.5': dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 - '@types/estree@1.0.7': {} + '@types/estree@1.0.8': {} '@types/flexsearch@0.7.6': {} @@ -6494,10 +6514,10 @@ snapshots: '@types/use-sync-external-store@0.0.6': {} - '@typescript/vfs@1.6.2(typescript@4.9.5)': + '@typescript/vfs@1.6.2(typescript@5.9.3)': dependencies: - debug: 4.4.1 - typescript: 4.9.5 + debug: 4.4.3 + typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -6510,35 +6530,18 @@ snapshots: '@xmldom/xmldom@0.9.8': {} - acorn-jsx@5.3.2(acorn@8.14.1): - dependencies: - acorn: 8.14.1 + '@zod/core@0.9.0': {} - acorn@8.14.1: {} + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 acorn@8.15.0: {} - ansi-regex@5.0.1: {} - - ansi-regex@6.0.1: {} - ansi-styles@3.2.1: dependencies: color-convert: 1.9.3 - ansi-styles@4.3.0: - dependencies: - color-convert: 2.0.1 - - ansi-styles@6.2.1: {} - - any-promise@1.3.0: {} - - anymatch@3.1.3: - dependencies: - normalize-path: 3.0.0 - picomatch: 2.3.1 - apg-lite@1.0.5: {} arg@5.0.2: {} @@ -6559,16 +6562,6 @@ snapshots: dependencies: tslib: 2.8.1 - autoprefixer@10.4.19(postcss@8.4.39): - dependencies: - browserslist: 4.23.1 - caniuse-lite: 1.0.30001640 - fraction.js: 4.3.7 - normalize-range: 0.1.2 - picocolors: 1.0.1 - postcss: 8.4.39 - postcss-value-parser: 4.2.0 - available-typed-arrays@1.0.7: dependencies: possible-typed-array-names: 1.1.0 @@ -6618,8 +6611,6 @@ snapshots: big.js@5.2.2: {} - binary-extensions@2.3.0: {} - boolbase@1.0.0: {} brace-expansion@2.0.1: @@ -6661,8 +6652,6 @@ snapshots: callsites@3.1.0: {} - camelcase-css@2.0.1: {} - camelcase@6.3.0: {} caniuse-lite@1.0.30001640: {} @@ -6701,21 +6690,9 @@ snapshots: '@chevrotain/utils': 11.0.3 lodash-es: 4.17.21 - chokidar@3.6.0: - dependencies: - anymatch: 3.1.3 - braces: 3.0.3 - glob-parent: 5.1.2 - is-binary-path: 2.1.0 - is-glob: 4.0.3 - normalize-path: 3.0.0 - readdirp: 3.6.0 - optionalDependencies: - fsevents: 2.3.3 - - class-variance-authority@0.7.0: + class-variance-authority@0.7.1: dependencies: - clsx: 2.0.0 + clsx: 2.1.1 classnames@2.5.1: {} @@ -6727,10 +6704,10 @@ snapshots: is-wsl: 3.1.0 is64bit: 2.0.0 - clsx@2.0.0: {} - clsx@2.1.1: {} + code-block-writer@13.0.3: {} + collapse-white-space@2.1.0: {} color-convert@1.9.3: @@ -6763,8 +6740,6 @@ snapshots: commander@13.1.0: {} - commander@4.1.1: {} - commander@7.2.0: {} commander@8.3.0: {} @@ -6797,16 +6772,16 @@ snapshots: dependencies: layout-base: 2.0.1 - cosmiconfig@8.3.6(typescript@4.9.5): + cosmiconfig@8.3.6(typescript@5.9.3): dependencies: import-fresh: 3.3.0 js-yaml: 4.1.0 parse-json: 5.2.0 path-type: 4.0.0 optionalDependencies: - typescript: 4.9.5 + typescript: 5.9.3 - cross-spawn@7.0.3: + cross-spawn@7.0.6: dependencies: path-key: 3.1.1 shebang-command: 2.0.0 @@ -6823,7 +6798,7 @@ snapshots: css-tree@2.2.1: dependencies: mdn-data: 2.0.28 - source-map-js: 1.2.0 + source-map-js: 1.2.1 css-tree@2.3.1: dependencies: @@ -6834,25 +6809,23 @@ snapshots: css.escape@1.5.1: {} - cssesc@3.0.0: {} - csso@5.0.5: dependencies: css-tree: 2.2.1 csstype@3.1.3: {} - cytoscape-cose-bilkent@4.1.0(cytoscape@3.32.0): + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): dependencies: cose-base: 1.0.3 - cytoscape: 3.32.0 + cytoscape: 3.33.1 - cytoscape-fcose@2.2.0(cytoscape@3.32.0): + cytoscape-fcose@2.2.0(cytoscape@3.33.1): dependencies: cose-base: 2.2.0 - cytoscape: 3.32.0 + cytoscape: 3.33.1 - cytoscape@3.32.0: {} + cytoscape@3.33.1: {} d3-array@2.12.1: dependencies: @@ -7036,7 +7009,7 @@ snapshots: dependencies: ms: 2.1.2 - debug@4.4.1: + debug@4.4.3: dependencies: ms: 2.1.3 @@ -7044,6 +7017,10 @@ snapshots: dependencies: character-entities: 2.0.2 + decode-named-character-reference@1.2.0: + dependencies: + character-entities: 2.0.2 + deep-extend@0.6.0: {} deepmerge@4.3.1: {} @@ -7064,17 +7041,12 @@ snapshots: detect-libc@2.0.3: {} - detect-libc@2.1.2: - optional: true + detect-libc@2.1.2: {} devlop@1.1.0: dependencies: dequal: 2.0.3 - didyoumean@1.2.2: {} - - dlv@1.1.3: {} - dom-serializer@2.0.0: dependencies: domelementtype: 2.3.0 @@ -7091,6 +7063,10 @@ snapshots: optionalDependencies: '@types/trusted-types': 2.0.7 + dompurify@3.3.0: + optionalDependencies: + '@types/trusted-types': 2.0.7 + domutils@3.1.0: dependencies: dom-serializer: 2.0.0 @@ -7110,21 +7086,18 @@ snapshots: es-errors: 1.3.0 gopd: 1.2.0 - eastasianwidth@0.2.0: {} - electron-to-chromium@1.4.819: {} - emoji-regex-xs@1.0.0: {} - - emoji-regex@8.0.0: {} - - emoji-regex@9.2.2: {} - emojis-list@3.0.0: {} + enhanced-resolve@5.18.3: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.3.0 + entities@4.5.0: {} - entities@6.0.0: {} + entities@6.0.1: {} error-ex@1.3.2: dependencies: @@ -7155,9 +7128,9 @@ snapshots: esast-util-from-js@2.0.1: dependencies: '@types/estree-jsx': 1.0.5 - acorn: 8.14.1 + acorn: 8.15.0 esast-util-from-estree: 2.0.0 - vfile-message: 4.0.2 + vfile-message: 4.0.3 escalade@3.1.2: {} @@ -7167,11 +7140,9 @@ snapshots: esm@3.2.25: {} - esprima@4.0.1: {} - estree-util-attach-comments@3.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 estree-util-build-jsx@3.0.1: dependencies: @@ -7186,18 +7157,18 @@ snapshots: estree-util-scope@1.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 devlop: 1.1.0 estree-util-to-js@2.0.0: dependencies: '@types/estree-jsx': 1.0.5 astring: 1.9.0 - source-map: 0.7.4 + source-map: 0.7.6 - estree-util-value-to-estree@3.4.0: + estree-util-value-to-estree@3.5.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 estree-util-visit@2.0.0: dependencies: @@ -7206,13 +7177,13 @@ snapshots: estree-walker@3.0.3: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 esutils@2.0.3: {} execa@8.0.1: dependencies: - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 get-stream: 8.0.1 human-signals: 5.0.0 is-stream: 3.0.0 @@ -7224,10 +7195,6 @@ snapshots: exsolve@1.0.8: {} - extend-shallow@2.0.1: - dependencies: - is-extendable: 0.1.1 - extend@3.0.2: {} fast-deep-equal@3.1.3: {} @@ -7240,6 +7207,14 @@ snapshots: merge2: 1.4.1 micromatch: 4.0.7 + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + fast-json-patch@3.1.1: {} fastq@1.17.1: @@ -7254,27 +7229,22 @@ snapshots: dependencies: format: 0.2.2 + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + fflate@0.4.8: {} fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 - flexsearch@0.7.43: {} - - flexsearch@0.8.158: {} - follow-redirects@1.15.9: {} for-each@0.3.5: dependencies: is-callable: 1.2.7 - foreground-child@3.2.1: - dependencies: - cross-spawn: 7.0.3 - signal-exit: 4.1.0 - form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -7285,11 +7255,6 @@ snapshots: format@0.2.2: {} - fraction.js@4.3.7: {} - - fsevents@2.3.3: - optional: true - function-bind@1.1.2: {} gensync@1.0.0-beta.2: {} @@ -7320,19 +7285,6 @@ snapshots: dependencies: is-glob: 4.0.3 - glob-parent@6.0.2: - dependencies: - is-glob: 4.0.3 - - glob@10.4.4: - dependencies: - foreground-child: 3.2.1 - jackspeak: 3.4.2 - minimatch: 9.0.5 - minipass: 7.1.2 - package-json-from-dist: 1.0.0 - path-scurry: 1.11.1 - globals@11.12.0: {} globals@15.15.0: {} @@ -7350,13 +7302,6 @@ snapshots: graceful-fs@4.2.11: {} - gray-matter@4.0.3: - dependencies: - js-yaml: 3.14.1 - kind-of: 6.0.3 - section-matter: 1.0.0 - strip-bom-string: 1.0.0 - hachure-fill@0.5.2: {} has-flag@3.0.0: {} @@ -7395,7 +7340,7 @@ snapshots: hast-util-from-parse5: 8.0.3 parse5: 7.3.0 vfile: 6.0.3 - vfile-message: 4.0.2 + vfile-message: 4.0.3 hast-util-from-parse5@8.0.3: dependencies: @@ -7424,7 +7369,7 @@ snapshots: hast-util-from-parse5: 8.0.3 hast-util-to-parse5: 8.0.0 html-void-elements: 3.0.0 - mdast-util-to-hast: 13.2.0 + mdast-util-to-hast: 13.2.1 parse5: 7.3.0 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 @@ -7434,7 +7379,7 @@ snapshots: hast-util-to-estree@3.1.3: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 '@types/estree-jsx': 1.0.5 '@types/hast': 3.0.4 comma-separated-tokens: 2.0.3 @@ -7461,7 +7406,7 @@ snapshots: comma-separated-tokens: 2.0.3 hast-util-whitespace: 3.0.0 html-void-elements: 3.0.0 - mdast-util-to-hast: 13.2.0 + mdast-util-to-hast: 13.2.1 property-information: 7.1.0 space-separated-tokens: 2.0.2 stringify-entities: 4.0.4 @@ -7469,7 +7414,7 @@ snapshots: hast-util-to-jsx-runtime@2.3.6: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 '@types/hast': 3.0.4 '@types/unist': 3.0.3 comma-separated-tokens: 2.0.3 @@ -7483,7 +7428,7 @@ snapshots: space-separated-tokens: 2.0.2 style-to-js: 1.1.21 unist-util-position: 5.0.0 - vfile-message: 4.0.2 + vfile-message: 4.0.3 transitivePeerDependencies: - supports-color @@ -7566,10 +7511,6 @@ snapshots: is-arrayish@0.3.2: {} - is-binary-path@2.1.0: - dependencies: - binary-extensions: 2.3.0 - is-callable@1.2.7: {} is-core-module@2.14.0: @@ -7580,12 +7521,8 @@ snapshots: is-docker@3.0.0: {} - is-extendable@0.1.1: {} - is-extglob@2.1.1: {} - is-fullwidth-code-point@3.0.0: {} - is-glob@4.0.3: dependencies: is-extglob: 2.1.1 @@ -7618,25 +7555,14 @@ snapshots: isexe@2.0.0: {} - jackspeak@3.4.2: - dependencies: - '@isaacs/cliui': 8.0.2 - optionalDependencies: - '@pkgjs/parseargs': 0.11.0 - javascript-stringify@2.1.0: {} - jiti@1.21.6: {} + jiti@2.6.1: {} js-file-download@0.4.12: {} js-tokens@4.0.0: {} - js-yaml@3.14.1: - dependencies: - argparse: 1.0.10 - esprima: 4.0.1 - js-yaml@4.1.0: dependencies: argparse: 2.0.1 @@ -7653,14 +7579,12 @@ snapshots: jstz@2.1.1: {} - katex@0.16.22: + katex@0.16.25: dependencies: commander: 8.3.0 khroma@2.1.0: {} - kind-of@6.0.3: {} - kolorist@1.8.0: {} langium@3.3.1: @@ -7675,9 +7599,54 @@ snapshots: layout-base@2.0.1: {} - lilconfig@2.1.0: {} + lightningcss-android-arm64@1.30.2: + optional: true + + lightningcss-darwin-arm64@1.30.2: + optional: true + + lightningcss-darwin-x64@1.30.2: + optional: true + + lightningcss-freebsd-x64@1.30.2: + optional: true + + lightningcss-linux-arm-gnueabihf@1.30.2: + optional: true + + lightningcss-linux-arm64-gnu@1.30.2: + optional: true + + lightningcss-linux-arm64-musl@1.30.2: + optional: true + + lightningcss-linux-x64-gnu@1.30.2: + optional: true + + lightningcss-linux-x64-musl@1.30.2: + optional: true - lilconfig@3.1.2: {} + lightningcss-win32-arm64-msvc@1.30.2: + optional: true + + lightningcss-win32-x64-msvc@1.30.2: + optional: true + + lightningcss@1.30.2: + dependencies: + detect-libc: 2.1.2 + optionalDependencies: + lightningcss-android-arm64: 1.30.2 + lightningcss-darwin-arm64: 1.30.2 + lightningcss-darwin-x64: 1.30.2 + lightningcss-freebsd-x64: 1.30.2 + lightningcss-linux-arm-gnueabihf: 1.30.2 + lightningcss-linux-arm64-gnu: 1.30.2 + lightningcss-linux-arm64-musl: 1.30.2 + lightningcss-linux-x64-gnu: 1.30.2 + lightningcss-linux-x64-musl: 1.30.2 + lightningcss-win32-arm64-msvc: 1.30.2 + lightningcss-win32-x64-msvc: 1.30.2 lines-and-columns@1.2.4: {} @@ -7701,14 +7670,8 @@ snapshots: lodash-es@4.17.21: {} - lodash.castarray@4.4.0: {} - lodash.debounce@4.0.8: {} - lodash.isplainobject@4.0.6: {} - - lodash.merge@4.6.2: {} - lodash@4.17.21: {} longest-streak@3.1.0: {} @@ -7726,12 +7689,14 @@ snapshots: fault: 1.0.4 highlight.js: 10.7.3 - lru-cache@10.4.1: {} - lru-cache@5.1.1: dependencies: yallist: 3.1.1 + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + markdown-extensions@2.0.0: {} markdown-it@14.1.0: @@ -7786,14 +7751,14 @@ snapshots: dependencies: '@types/mdast': 4.0.4 escape-string-regexp: 5.0.0 - unist-util-is: 6.0.0 - unist-util-visit-parents: 6.0.1 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 mdast-util-from-markdown@2.0.2: dependencies: '@types/mdast': 4.0.4 '@types/unist': 3.0.3 - decode-named-character-reference: 1.1.0 + decode-named-character-reference: 1.2.0 devlop: 1.1.0 mdast-util-to-string: 4.0.0 micromark: 4.0.2 @@ -7910,7 +7875,7 @@ snapshots: parse-entities: 4.0.2 stringify-entities: 4.0.4 unist-util-stringify-position: 4.0.0 - vfile-message: 4.0.2 + vfile-message: 4.0.3 transitivePeerDependencies: - supports-color @@ -7938,9 +7903,9 @@ snapshots: mdast-util-phrasing@4.1.0: dependencies: '@types/mdast': 4.0.4 - unist-util-is: 6.0.0 + unist-util-is: 6.0.1 - mdast-util-to-hast@13.2.0: + mdast-util-to-hast@13.2.1: dependencies: '@types/hast': 3.0.4 '@types/mdast': 4.0.4 @@ -7984,15 +7949,15 @@ snapshots: '@iconify/utils': 3.0.2 '@mermaid-js/parser': 0.6.3 '@types/d3': 7.4.3 - cytoscape: 3.32.0 - cytoscape-cose-bilkent: 4.1.0(cytoscape@3.32.0) - cytoscape-fcose: 2.2.0(cytoscape@3.32.0) + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) d3: 7.9.0 d3-sankey: 0.12.3 dagre-d3-es: 7.0.13 dayjs: 1.11.19 - dompurify: 3.2.6 - katex: 0.16.22 + dompurify: 3.3.0 + katex: 0.16.25 khroma: 2.1.0 lodash-es: 4.17.21 marked: 16.4.2 @@ -8007,7 +7972,7 @@ snapshots: micromark-core-commonmark@2.0.3: dependencies: - decode-named-character-reference: 1.1.0 + decode-named-character-reference: 1.2.0 devlop: 1.1.0 micromark-factory-destination: 2.0.1 micromark-factory-label: 2.0.1 @@ -8093,7 +8058,7 @@ snapshots: dependencies: '@types/katex': 0.16.7 devlop: 1.1.0 - katex: 0.16.22 + katex: 0.16.25 micromark-factory-space: 2.0.1 micromark-util-character: 2.1.1 micromark-util-symbol: 2.0.1 @@ -8101,7 +8066,7 @@ snapshots: micromark-extension-mdx-expression@3.0.1: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 devlop: 1.1.0 micromark-factory-mdx-expression: 2.0.3 micromark-factory-space: 2.0.1 @@ -8112,7 +8077,7 @@ snapshots: micromark-extension-mdx-jsx@3.0.2: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 devlop: 1.1.0 estree-util-is-identifier-name: 3.0.0 micromark-factory-mdx-expression: 2.0.3 @@ -8121,7 +8086,7 @@ snapshots: micromark-util-events-to-acorn: 2.0.3 micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 - vfile-message: 4.0.2 + vfile-message: 4.0.3 micromark-extension-mdx-md@2.0.0: dependencies: @@ -8129,7 +8094,7 @@ snapshots: micromark-extension-mdxjs-esm@3.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 devlop: 1.1.0 micromark-core-commonmark: 2.0.3 micromark-util-character: 2.1.1 @@ -8137,12 +8102,12 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 unist-util-position-from-estree: 2.0.0 - vfile-message: 4.0.2 + vfile-message: 4.0.3 micromark-extension-mdxjs@3.0.0: dependencies: - acorn: 8.14.1 - acorn-jsx: 5.3.2(acorn@8.14.1) + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) micromark-extension-mdx-expression: 3.0.1 micromark-extension-mdx-jsx: 3.0.2 micromark-extension-mdx-md: 2.0.0 @@ -8165,7 +8130,7 @@ snapshots: micromark-factory-mdx-expression@2.0.3: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 devlop: 1.1.0 micromark-factory-space: 2.0.1 micromark-util-character: 2.1.1 @@ -8173,7 +8138,7 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 unist-util-position-from-estree: 2.0.0 - vfile-message: 4.0.2 + vfile-message: 4.0.3 micromark-factory-space@2.0.1: dependencies: @@ -8220,7 +8185,7 @@ snapshots: micromark-util-decode-string@2.0.1: dependencies: - decode-named-character-reference: 1.1.0 + decode-named-character-reference: 1.2.0 micromark-util-character: 2.1.1 micromark-util-decode-numeric-character-reference: 2.0.2 micromark-util-symbol: 2.0.1 @@ -8229,13 +8194,13 @@ snapshots: micromark-util-events-to-acorn@2.0.3: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 '@types/unist': 3.0.3 devlop: 1.1.0 estree-util-visit: 2.0.0 micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 - vfile-message: 4.0.2 + vfile-message: 4.0.3 micromark-util-html-tag-name@2.0.1: {} @@ -8267,8 +8232,8 @@ snapshots: micromark@4.0.2: dependencies: '@types/debug': 4.1.12 - debug: 4.4.1 - decode-named-character-reference: 1.1.0 + debug: 4.4.3 + decode-named-character-reference: 1.2.0 devlop: 1.1.0 micromark-core-commonmark: 2.0.3 micromark-factory-space: 2.0.1 @@ -8296,6 +8261,11 @@ snapshots: braces: 3.0.3 picomatch: 2.3.1 + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + mime-db@1.52.0: {} mime-types@2.1.35: @@ -8308,18 +8278,16 @@ snapshots: dependencies: lodash: 4.17.21 - minimatch@7.4.6: + minimatch@10.1.1: dependencies: - brace-expansion: 2.0.1 + '@isaacs/brace-expansion': 5.0.0 - minimatch@9.0.5: + minimatch@7.4.6: dependencies: brace-expansion: 2.0.1 minimist@1.2.8: {} - minipass@7.1.2: {} - mj-context-menu@0.6.1: {} mlly@1.8.0: @@ -8335,16 +8303,8 @@ snapshots: ms@2.1.3: {} - mz@2.7.0: - dependencies: - any-promise: 1.3.0 - object-assign: 4.1.1 - thenify-all: 1.6.0 - nanoid@3.3.11: {} - nanoid@3.3.7: {} - negotiator@1.0.0: {} neotraverse@0.6.18: {} @@ -8385,66 +8345,69 @@ snapshots: - '@babel/core' - babel-plugin-macros - nextra-theme-docs@3.3.1(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(nextra@3.3.1(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@4.9.5))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + nextra-theme-docs@4.6.0(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(nextra@4.6.0(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(use-sync-external-store@1.6.0(react@18.3.1)): dependencies: '@headlessui/react': 2.2.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) clsx: 2.1.1 - escape-string-regexp: 5.0.0 - flexsearch: 0.7.43 next: 15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - nextra: 3.3.1(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@4.9.5) + nextra: 4.6.0(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3) react: 18.3.1 + react-compiler-runtime: 19.1.0-rc.3(react@18.3.1) react-dom: 18.3.1(react@18.3.1) scroll-into-view-if-needed: 3.1.0 - zod: 3.25.49 + zod: 4.0.0-beta.20250424T163858 + zustand: 5.0.8(@types/react@19.1.6)(react@18.3.1)(use-sync-external-store@1.6.0(react@18.3.1)) + transitivePeerDependencies: + - '@types/react' + - immer + - use-sync-external-store - nextra@3.3.1(@types/react@19.1.6)(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@4.9.5): + nextra@4.6.0(next@15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3): dependencies: - '@formatjs/intl-localematcher': 0.5.10 + '@formatjs/intl-localematcher': 0.6.2 '@headlessui/react': 2.2.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mdx-js/mdx': 3.1.1 - '@mdx-js/react': 3.1.1(@types/react@19.1.6)(react@18.3.1) - '@napi-rs/simple-git': 0.1.19 - '@shikijs/twoslash': 1.29.2(typescript@4.9.5) - '@theguild/remark-mermaid': 0.1.3(react@18.3.1) + '@napi-rs/simple-git': 0.1.22 + '@shikijs/twoslash': 3.15.0(typescript@5.9.3) + '@theguild/remark-mermaid': 0.3.0(react@18.3.1) '@theguild/remark-npm2yarn': 0.3.3 better-react-mathjax: 2.3.0(react@18.3.1) clsx: 2.1.1 estree-util-to-js: 2.0.0 - estree-util-value-to-estree: 3.4.0 + estree-util-value-to-estree: 3.5.0 + fast-glob: 3.3.3 github-slugger: 2.0.0 - graceful-fs: 4.2.11 - gray-matter: 4.0.3 hast-util-to-estree: 3.1.3 - katex: 0.16.22 + katex: 0.16.25 mdast-util-from-markdown: 2.0.2 mdast-util-gfm: 3.1.0 - mdast-util-to-hast: 13.2.0 + mdast-util-to-hast: 13.2.1 negotiator: 1.0.0 next: 15.5.7(@babel/core@7.24.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - p-limit: 6.2.0 react: 18.3.1 + react-compiler-runtime: 19.1.0-rc.3(react@18.3.1) react-dom: 18.3.1(react@18.3.1) react-medium-image-zoom: 5.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) rehype-katex: 7.0.1 - rehype-pretty-code: 0.14.0(shiki@1.29.2) + rehype-pretty-code: 0.14.1(shiki@3.15.0) rehype-raw: 7.0.0 remark-frontmatter: 5.0.0 remark-gfm: 4.0.1 remark-math: 6.0.0 remark-reading-time: 2.0.2 remark-smartypants: 3.0.2 - shiki: 1.29.2 + server-only: 0.0.1 + shiki: 3.15.0 slash: 5.1.0 title: 4.0.1 + ts-morph: 27.0.2 unist-util-remove: 4.0.0 unist-util-visit: 5.0.0 - yaml: 2.4.5 - zod: 3.25.49 - zod-validation-error: 3.5.4(zod@3.25.49) + unist-util-visit-children: 3.0.0 + yaml: 2.8.1 + zod: 4.0.0-beta.20250424T163858 transitivePeerDependencies: - - '@types/react' - supports-color - typescript @@ -8474,10 +8437,6 @@ snapshots: node-releases@2.0.14: {} - normalize-path@3.0.0: {} - - normalize-range@0.1.2: {} - npm-run-path@5.3.0: dependencies: path-key: 4.0.0 @@ -8490,17 +8449,17 @@ snapshots: object-assign@4.1.1: {} - object-hash@3.0.0: {} - onetime@6.0.0: dependencies: mimic-fn: 4.0.0 - oniguruma-to-es@2.3.0: + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.4: dependencies: - emoji-regex-xs: 1.0.0 - regex: 5.1.1 - regex-recursion: 5.1.1 + oniguruma-parser: 0.12.1 + regex: 6.0.1 + regex-recursion: 6.0.2 openapi-path-templating@2.2.1: dependencies: @@ -8510,14 +8469,17 @@ snapshots: dependencies: apg-lite: 1.0.5 - p-limit@6.2.0: - dependencies: - yocto-queue: 1.2.2 - - package-json-from-dist@1.0.0: {} - package-manager-detector@1.5.0: {} + pagefind@1.4.0: + optionalDependencies: + '@pagefind/darwin-arm64': 1.4.0 + '@pagefind/darwin-x64': 1.4.0 + '@pagefind/freebsd-x64': 1.4.0 + '@pagefind/linux-arm64': 1.4.0 + '@pagefind/linux-x64': 1.4.0 + '@pagefind/windows-x64': 1.4.0 + parent-module@1.0.1: dependencies: callsites: 3.1.0 @@ -8552,7 +8514,9 @@ snapshots: parse5@7.3.0: dependencies: - entities: 6.0.0 + entities: 6.0.1 + + path-browserify@1.0.1: {} path-data-parser@0.1.0: {} @@ -8562,11 +8526,6 @@ snapshots: path-parse@1.0.7: {} - path-scurry@1.11.1: - dependencies: - lru-cache: 10.4.1 - minipass: 7.1.2 - path-type@4.0.0: {} path-type@5.0.0: {} @@ -8579,9 +8538,7 @@ snapshots: picomatch@2.3.1: {} - pify@2.3.0: {} - - pirates@4.0.6: {} + picomatch@4.0.3: {} pkg-types@1.3.1: dependencies: @@ -8604,54 +8561,12 @@ snapshots: possible-typed-array-names@1.1.0: {} - postcss-import@15.1.0(postcss@8.4.39): - dependencies: - postcss: 8.4.39 - postcss-value-parser: 4.2.0 - read-cache: 1.0.0 - resolve: 1.22.8 - - postcss-js@4.0.1(postcss@8.4.39): - dependencies: - camelcase-css: 2.0.1 - postcss: 8.4.39 - - postcss-load-config@4.0.2(postcss@8.4.39): - dependencies: - lilconfig: 3.1.2 - yaml: 2.4.5 - optionalDependencies: - postcss: 8.4.39 - - postcss-nested@6.0.1(postcss@8.4.39): - dependencies: - postcss: 8.4.39 - postcss-selector-parser: 6.1.0 - - postcss-selector-parser@6.0.10: - dependencies: - cssesc: 3.0.0 - util-deprecate: 1.0.2 - - postcss-selector-parser@6.1.0: - dependencies: - cssesc: 3.0.0 - util-deprecate: 1.0.2 - - postcss-value-parser@4.2.0: {} - postcss@8.4.31: dependencies: nanoid: 3.3.11 picocolors: 1.1.1 source-map-js: 1.2.1 - postcss@8.4.39: - dependencies: - nanoid: 3.3.7 - picocolors: 1.0.1 - source-map-js: 1.2.0 - postcss@8.5.6: dependencies: nanoid: 3.3.11 @@ -8709,6 +8624,10 @@ snapshots: dependencies: safe-buffer: 5.2.1 + react-compiler-runtime@19.1.0-rc.3(react@18.3.1): + dependencies: + react: 18.3.1 + react-copy-to-clipboard@5.1.0(react@18.3.1): dependencies: copy-to-clipboard: 3.3.3 @@ -8781,26 +8700,18 @@ snapshots: dependencies: loose-envify: 1.4.0 - read-cache@1.0.0: - dependencies: - pify: 2.3.0 - - readdirp@3.6.0: - dependencies: - picomatch: 2.3.1 - reading-time@1.5.0: {} recma-build-jsx@1.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 estree-util-build-jsx: 3.0.1 vfile: 6.0.3 - recma-jsx@1.0.1(acorn@8.14.1): + recma-jsx@1.0.1(acorn@8.15.0): dependencies: - acorn: 8.14.1 - acorn-jsx: 5.3.2(acorn@8.14.1) + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) estree-util-to-js: 2.0.0 recma-parse: 1.0.0 recma-stringify: 1.0.0 @@ -8808,14 +8719,14 @@ snapshots: recma-parse@1.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 esast-util-from-js: 2.0.1 unified: 11.0.5 vfile: 6.0.3 recma-stringify@1.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 estree-util-to-js: 2.0.0 unified: 11.0.5 vfile: 6.0.3 @@ -8845,14 +8756,13 @@ snapshots: dependencies: '@babel/runtime': 7.24.7 - regex-recursion@5.1.1: + regex-recursion@6.0.2: dependencies: - regex: 5.1.1 regex-utilities: 2.3.0 regex-utilities@2.3.0: {} - regex@5.1.1: + regex@6.0.1: dependencies: regex-utilities: 2.3.0 @@ -8875,8 +8785,8 @@ snapshots: '@types/katex': 0.16.7 hast-util-from-html-isomorphic: 2.0.0 hast-util-to-text: 4.0.2 - katex: 0.16.22 - unist-util-visit-parents: 6.0.1 + katex: 0.16.25 + unist-util-visit-parents: 6.0.2 vfile: 6.0.3 rehype-parse@9.0.1: @@ -8885,13 +8795,13 @@ snapshots: hast-util-from-html: 2.0.3 unified: 11.0.5 - rehype-pretty-code@0.14.0(shiki@1.29.2): + rehype-pretty-code@0.14.1(shiki@3.15.0): dependencies: '@types/hast': 3.0.4 hast-util-to-string: 3.0.1 parse-numeric-range: 1.3.0 rehype-parse: 9.0.1 - shiki: 1.29.2 + shiki: 3.15.0 unified: 11.0.5 unist-util-visit: 5.0.0 @@ -8903,7 +8813,7 @@ snapshots: rehype-recma@1.0.0: dependencies: - '@types/estree': 1.0.7 + '@types/estree': 1.0.8 '@types/hast': 3.0.4 hast-util-to-estree: 3.1.3 transitivePeerDependencies: @@ -8957,7 +8867,7 @@ snapshots: remark-reading-time@2.0.2: dependencies: estree-util-is-identifier-name: 2.1.0 - estree-util-value-to-estree: 3.4.0 + estree-util-value-to-estree: 3.5.0 reading-time: 1.5.0 unist-util-visit: 3.1.0 @@ -8965,7 +8875,7 @@ snapshots: dependencies: '@types/hast': 3.0.4 '@types/mdast': 4.0.4 - mdast-util-to-hast: 13.2.0 + mdast-util-to-hast: 13.2.1 unified: 11.0.5 vfile: 6.0.3 @@ -9061,11 +8971,6 @@ snapshots: dependencies: compute-scroll-into-view: 3.1.1 - section-matter@1.0.0: - dependencies: - extend-shallow: 2.0.1 - kind-of: 6.0.3 - semver@6.3.1: {} semver@7.7.1: {} @@ -9077,6 +8982,8 @@ snapshots: dependencies: type-fest: 0.20.2 + server-only@0.0.1: {} + set-function-length@1.2.2: dependencies: define-data-property: 1.1.4 @@ -9157,14 +9064,14 @@ snapshots: shebang-regex@3.0.0: {} - shiki@1.29.2: + shiki@3.15.0: dependencies: - '@shikijs/core': 1.29.2 - '@shikijs/engine-javascript': 1.29.2 - '@shikijs/engine-oniguruma': 1.29.2 - '@shikijs/langs': 1.29.2 - '@shikijs/themes': 1.29.2 - '@shikijs/types': 1.29.2 + '@shikijs/core': 3.15.0 + '@shikijs/engine-javascript': 3.15.0 + '@shikijs/engine-oniguruma': 3.15.0 + '@shikijs/langs': 3.15.0 + '@shikijs/themes': 3.15.0 + '@shikijs/types': 3.15.0 '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 @@ -9189,7 +9096,7 @@ snapshots: source-map-js@1.2.1: {} - source-map@0.7.4: {} + source-map@0.7.6: {} space-separated-tokens@2.0.2: {} @@ -9201,33 +9108,11 @@ snapshots: sprintf-js@1.0.3: {} - string-width@4.2.3: - dependencies: - emoji-regex: 8.0.0 - is-fullwidth-code-point: 3.0.0 - strip-ansi: 6.0.1 - - string-width@5.1.2: - dependencies: - eastasianwidth: 0.2.0 - emoji-regex: 9.2.2 - strip-ansi: 7.1.0 - stringify-entities@4.0.4: dependencies: character-entities-html4: 2.1.0 character-entities-legacy: 3.0.0 - strip-ansi@6.0.1: - dependencies: - ansi-regex: 5.0.1 - - strip-ansi@7.1.0: - dependencies: - ansi-regex: 6.0.1 - - strip-bom-string@1.0.0: {} - strip-final-newline@3.0.0: {} style-to-js@1.1.21: @@ -9247,16 +9132,6 @@ snapshots: stylis@4.3.6: {} - sucrase@3.35.0: - dependencies: - '@jridgewell/gen-mapping': 0.3.5 - commander: 4.1.1 - glob: 10.4.4 - lines-and-columns: 1.2.4 - mz: 2.7.0 - pirates: 4.0.6 - ts-interface-checker: 0.1.13 - supports-color@5.5.0: dependencies: has-flag: 3.0.0 @@ -9344,45 +9219,19 @@ snapshots: tabbable@6.3.0: {} - tailwind-merge@2.4.0: {} + tailwind-merge@3.4.0: {} - tailwindcss@3.4.4: - dependencies: - '@alloc/quick-lru': 5.2.0 - arg: 5.0.2 - chokidar: 3.6.0 - didyoumean: 1.2.2 - dlv: 1.1.3 - fast-glob: 3.3.2 - glob-parent: 6.0.2 - is-glob: 4.0.3 - jiti: 1.21.6 - lilconfig: 2.1.0 - micromatch: 4.0.7 - normalize-path: 3.0.0 - object-hash: 3.0.0 - picocolors: 1.0.1 - postcss: 8.4.39 - postcss-import: 15.1.0(postcss@8.4.39) - postcss-js: 4.0.1(postcss@8.4.39) - postcss-load-config: 4.0.2(postcss@8.4.39) - postcss-nested: 6.0.1(postcss@8.4.39) - postcss-selector-parser: 6.1.0 - resolve: 1.22.8 - sucrase: 3.35.0 - transitivePeerDependencies: - - ts-node - - thenify-all@1.6.0: - dependencies: - thenify: 3.3.1 + tailwindcss@4.1.17: {} - thenify@3.3.1: - dependencies: - any-promise: 1.3.0 + tapable@2.3.0: {} tinyexec@1.0.2: {} + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + title@4.0.1: dependencies: arg: 5.0.2 @@ -9429,23 +9278,26 @@ snapshots: ts-dedent@2.2.0: {} - ts-interface-checker@0.1.13: {} - ts-mixer@6.0.4: {} + ts-morph@27.0.2: + dependencies: + '@ts-morph/common': 0.28.1 + code-block-writer: 13.0.3 + ts-toolbelt@9.6.0: {} tslib@2.6.3: {} tslib@2.8.1: {} - twoslash-protocol@0.2.12: {} + twoslash-protocol@0.3.4: {} - twoslash@0.2.12(typescript@4.9.5): + twoslash@0.3.4(typescript@5.9.3): dependencies: - '@typescript/vfs': 1.6.2(typescript@4.9.5) - twoslash-protocol: 0.2.12 - typescript: 4.9.5 + '@typescript/vfs': 1.6.2(typescript@5.9.3) + twoslash-protocol: 0.3.4 + typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -9461,7 +9313,7 @@ snapshots: dependencies: ts-toolbelt: 9.6.0 - typescript@4.9.5: {} + typescript@5.9.3: {} uc.micro@2.1.0: {} @@ -9495,7 +9347,7 @@ snapshots: unist-util-find-after@5.0.0: dependencies: '@types/unist': 3.0.3 - unist-util-is: 6.0.0 + unist-util-is: 6.0.1 unist-util-is@5.2.1: dependencies: @@ -9505,6 +9357,10 @@ snapshots: dependencies: '@types/unist': 3.0.3 + unist-util-is@6.0.1: + dependencies: + '@types/unist': 3.0.3 + unist-util-modify-children@4.0.0: dependencies: '@types/unist': 3.0.3 @@ -9526,8 +9382,8 @@ snapshots: unist-util-remove@4.0.0: dependencies: '@types/unist': 3.0.3 - unist-util-is: 6.0.0 - unist-util-visit-parents: 6.0.1 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 unist-util-stringify-position@4.0.0: dependencies: @@ -9547,6 +9403,11 @@ snapshots: '@types/unist': 3.0.3 unist-util-is: 6.0.0 + unist-util-visit-parents@6.0.2: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit@3.1.0: dependencies: '@types/unist': 2.0.11 @@ -9565,7 +9426,7 @@ snapshots: dependencies: browserslist: 4.23.1 escalade: 3.1.2 - picocolors: 1.0.1 + picocolors: 1.1.1 url-parse@1.5.10: dependencies: @@ -9576,7 +9437,9 @@ snapshots: dependencies: react: 18.3.1 - util-deprecate@1.0.2: {} + use-sync-external-store@1.6.0(react@18.3.1): + dependencies: + react: 18.3.1 uuid@11.1.0: {} @@ -9590,6 +9453,11 @@ snapshots: '@types/unist': 3.0.3 unist-util-stringify-position: 4.0.0 + vfile-message@4.0.3: + dependencies: + '@types/unist': 3.0.3 + unist-util-stringify-position: 4.0.0 + vfile@6.0.3: dependencies: '@types/unist': 3.0.3 @@ -9637,18 +9505,6 @@ snapshots: wicked-good-xpath@1.3.0: {} - wrap-ansi@7.0.0: - dependencies: - ansi-styles: 4.3.0 - string-width: 4.2.3 - strip-ansi: 6.0.1 - - wrap-ansi@8.1.0: - dependencies: - ansi-styles: 6.2.1 - string-width: 5.1.2 - strip-ansi: 7.1.0 - xml-but-prettier@1.0.1: dependencies: repeat-string: 1.6.1 @@ -9665,7 +9521,7 @@ snapshots: yaml@2.4.5: {} - yocto-queue@1.2.2: {} + yaml@2.8.1: {} youtube-player@5.5.2: dependencies: @@ -9677,10 +9533,14 @@ snapshots: zenscroll@4.0.2: {} - zod-validation-error@3.5.4(zod@3.25.49): + zod@4.0.0-beta.20250424T163858: dependencies: - zod: 3.25.49 + '@zod/core': 0.9.0 - zod@3.25.49: {} + zustand@5.0.8(@types/react@19.1.6)(react@18.3.1)(use-sync-external-store@1.6.0(react@18.3.1)): + optionalDependencies: + '@types/react': 19.1.6 + react: 18.3.1 + use-sync-external-store: 1.6.0(react@18.3.1) zwitch@2.0.4: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml new file mode 100644 index 0000000..6c408ac --- /dev/null +++ b/pnpm-workspace.yaml @@ -0,0 +1,15 @@ +ignoredBuiltDependencies: + - '@scarf/scarf' + - '@vercel/speed-insights' + - core-js + - core-js-pure + - sharp + - tree-sitter + - tree-sitter-json + +onlyBuiltDependencies: + - '@fortawesome/fontawesome-common-types' + - '@fortawesome/fontawesome-svg-core' + - '@fortawesome/free-brands-svg-icons' + - '@fortawesome/free-solid-svg-icons' + - '@tree-sitter-grammars/tree-sitter-yaml' diff --git a/postcss.config.js b/postcss.config.js deleted file mode 100644 index cbfea5e..0000000 --- a/postcss.config.js +++ /dev/null @@ -1,7 +0,0 @@ -module.exports = { - plugins: { - "tailwindcss/nesting": {}, - tailwindcss: {}, - autoprefixer: {}, - }, -}; diff --git a/postcss.config.mjs b/postcss.config.mjs new file mode 100644 index 0000000..c2ddf74 --- /dev/null +++ b/postcss.config.mjs @@ -0,0 +1,5 @@ +export default { + plugins: { + "@tailwindcss/postcss": {}, + }, +}; diff --git a/public/feed.json b/public/feed.json index a5d488c..70ed61c 100644 --- a/public/feed.json +++ b/public/feed.json @@ -1,9 +1,417 @@ { - "version": "https://jsonfeed.org/version/1", - "title": "AuthZed Blog", - "home_page_url": "https://authzed.com", - "feed_url": "https://authzed.com/feed/json", - "description": "The AuthZed blog: Articles from the AuthZed team about SpiceDB, Fine Grained Authorization, Google Zanzibar, and engineering culture.", - "icon": "https://authzed.com/authzed-logo-multi.svg", - "items": [] -} + "version": "https://jsonfeed.org/version/1", + "title": "AuthZed Blog", + "home_page_url": "https://authzed.com", + "feed_url": "https://authzed.com/feed/json", + "description": "The AuthZed blog: Articles from the AuthZed team about SpiceDB, Fine Grained Authorization, Google Zanzibar, and engineering culture.", + "icon": "https://authzed.com/authzed-logo-multi.svg", + "items": [ + { + "id": "https://authzed.com/blog/timeline-mcp-breaches", + "content_html": "

MCP Article #1:

\n

Timeline of MCP Breaches

\n
\n

AI fundamentally changes the interface, but not the fundamentals of security. Read on to find out why

\n
\n

It feels like eons ago when the Model Context Protocol (MCP) was introduced (it was only in November 2024 lol)

\n

It promised to become the USB-C of AI agents — a universal bridge for connecting LLMs to tools, APIs, documents, emails, codebases, databases and cloud infrastructure. In just months, the ecosystem exploded: dozens of tool servers, open-source integrations, host implementations, and hosted MCP registries began to appear.

\n

As the ecosystem rapidly adopted MCP, it presented the classic challenge of securing any new technology: developers connected powerful, sensitive systems without rigorously applying established security controls and fundamental principles to the new spec. By mid-2025, the vulnerabilities were exposed, confirming that the new AI-native world is governed by the same security principles as traditional software.

\n

Below is the first consolidated timeline tracing the major MCP-related breaches and security failures - what happened, what data was exposed, why it happened, and what they reveal about the new threat surface LLMs bring into organisations.

\n

Timeline:

\n

Apr - Jun 2025

\n

1. April 2025 – WhatsApp MCP Exploited: Chat-History Exfiltration

\n
    \n
  • \n

    What happened: Invariant Labs demonstrated that a malicious MCP server could silently exfiltrate a user’s entire WhatsApp history by combining “tool poisoning” with a legitimate whatsapp-mcp server in the same agent. A “random fact of the day” tool morphed into a sleeper backdoor that rewrote how WhatsApp messages are sent. Invariant Labs Link

    \n
  • \n
  • \n

    Data at risk & why: Once the agent read the poisoned tool description, it happily followed hidden instructions to send hundreds or thousands of past WhatsApp messages (personal chats, business deals, customer data) to an attacker-controlled phone number – all disguised as ordinary outbound messages, bypassing typical Data Loss Prevention (DLP) tooling.

    \n
  • \n
\n

2. May 2025 – GitHub MCP “Prompt Injection Data Heist”

\n
    \n
  • \n

    What happened: Invariant Labs uncovered a prompt-injection attack against the official GitHub MCP server: a malicious public GitHub issue could hijack an AI assistant and make it pull data from private repos, then leak that data back to a public repo. Invariant Labs link

    \n
  • \n
  • \n

    Data breached & why: With a single over-privileged Personal Access Token wired into the MCP server, the compromised agent exfiltrated private repository contents, internal project details, and even personal financial/salary information into a public pull request. The root cause was broad PAT scopes combined with untrusted content (issues) in the LLM context, letting a prompt-injected agent abuse legitimate MCP tool calls.

    \n
  • \n
\n

3. June 2025: Asana MCP Server Bug

\n
    \n
  • \n

    What happened: Asana discovered a bug in its MCP-server feature that could allow data belonging to one organisation to be seen by other organisations using their system. Upguard link.

    \n
  • \n
  • \n

    Data breached & why: Projects, teams, tasks and other Asana objects belonging to one customer potentially accessible by a different customer. This was caused by a logic flaw in the access control of their MCP-enabled integration (cross-tenant access not properly isolated).

    \n
  • \n
\n

4. June 2025 – Anthropic MCP Inspector RCE

\n
    \n
  • \n

    What happened: Researchers found that Anthropic’s MCP Inspector developer tool allowed unauthenticated remote code execution via its inspector–proxy architecture. An attacker could get arbitrary commands run on a dev machine just by having the victim inspect a malicious MCP server, or even by driving the inspector from a browser. CVE Link

    \n
  • \n
  • \n

    Data at risk & why: Because the inspector ran with the user’s privileges and lacked authentication while listening on localhost / 0.0.0.0, a successful exploit could expose the entire filesystem, API keys, and environment secrets on the developer workstation – effectively turning a debugging tool into a remote shell. VSec Medium Link

    \n
  • \n
\n
\n

Jul - Sept 2025

\n

1. July 2025 – mcp-remote OS Command Injection

\n
    \n
  • \n

    What happened: JFrog disclosed CVE-2025-6514, a critical OS command-injection bug in mcp-remote, a popular OAuth proxy for connecting local MCP clients to remote servers. Malicious MCP servers could send a booby-trapped authorization_endpoint that mcp-remote passed straight into the system shell, achieving remote code execution on the client machine. CVE Link

    \n
  • \n
  • \n

    Data at risk & why: With over 437,000 downloads and adoption in Cloudflare, Hugging Face, Auth0 and other integration guides, the vuln effectively turned any unpatched install into a supply-chain backdoor: an attacker could execute arbitrary commands, steal API keys, cloud credentials, local files, SSH keys, and Git repo contents, all triggered by pointing your LLM host at a malicious MCP endpoint. Docker Blog

    \n
  • \n
\n

2. August 2025: Anthropic “Filesystem MCP Server” Vulnerabilities

\n
    \n
  • \n

    What happened: Security researchers found two critical flaws in Anthropic’s Filesystem-MCP server: sandbox escape and symlink/containment bypass, enabling arbitrary file access and code execution. Cymulate Link

    \n
  • \n
  • \n

    Data breached & why: Host filesystem access, meaning sensitive files, credentials, logs, or other data on servers could be impacted. The root cause was poor sandbox implementation and insufficient directory-containment enforcement in the MCP server’s file-tool interface.

    \n
  • \n
\n

3. September 2025: Malicious MCP Server in the Wild

\n
    \n
  • \n

    What happened: A malicious MCP server package masquerading as a legitimate “Postmark MCP Server” was found injecting BCC copies of all email communications (including confidential docs) to an attacker’s server. IT Pro

    \n
  • \n
  • \n

    Data breached & why: Emails, internal memos, invoices — essentially all mail traffic processed by that MCP server were exposed. This was due to a supply-chain compromise / malicious package in MCP ecosystem, and the fact that MCP servers often run with high-privilege accesses which were exploited.

    \n
  • \n
\n
\n

Oct - Dec 2025

\n

1. October 2025 – Smithery MCP Hosting Supply-Chain Breach

\n
    \n
  • \n

    What happened: While researching Smithery’s hosted MCP server platform, GitGuardian found a path-traversal bug in the smithery.yaml build config. By setting dockerBuildPath: \"..\", attackers could make the registry build Docker images from the builder’s home directory, then exfiltrate its contents and credentials. GitGuardian Blog

    \n
  • \n
  • \n

    Data breached & why: The exploit leaked the builder’s ~/.docker/config.json, including a Fly.io API token that granted control over >3,000 apps, most of them hosted MCP servers. From there, attackers could run arbitrary commands in MCP server containers and tap inbound client traffic that contained API keys and other secrets for downstream services (e.g. Brave API keys), turning the MCP hosting service itself into a high-impact supply-chain compromise.

    \n
  • \n
\n\n
    \n
  • \n

    What happened: A command-injection flaw was discovered in the Figma/Framelink MCP integration: unsanitised user input in shell commands could lead to remote code execution. The Hacker News Link

    \n
  • \n
  • \n

    Data breached & why: Because the integration allowed AI-agents to interact with Figma docs, the flaw could enable attackers to run arbitrary commands through the MCP tooling and access design data or infrastructure. The root cause was the unsafe use of child_process.exec with untrusted input in the MCP server code - essentially a lack of input sanitisation.CVE Link

    \n
  • \n
\n

..And we’re sure there are more to come. We’ll keep this blog updated with the latest in security and data breaches in the MCP world.

\n
\n

Patterns Emerging Across Incidents

\n

Across all these breaches, common themes appear:

\n

1. Local AI dev tools behave like exposed remote APIs

\n

MCP Inspector, mcp-remote, and similar tooling turned into Remote Code Execution (RCE) surfaces simply by trusting localhost connections.

\n

2. Over-privileged API tokens are catastrophic in MCP workflows

\n

GitHub MCP, Smithery, and WhatsApp attacks all exploited overly broad token scopes.

\n

3. “Tool poisoning” is a new, AI-native supply chain vector

\n

Traditional security tools don’t monitor changes to MCP tool descriptions.

\n

4. Hosted MCP registries concentrate risk

\n

Smithery illustrated what happens when thousands of tenants rely on a single build pipeline.

\n

5. Prompt injection becomes a full data breach

\n

The GitHub MCP incident demonstrated how natural language alone can cause exfiltration when MCP calls are available.

\n
\n

Conclusion:

\n

The Model Context Protocol (MCP) presents a cutting-edge threat surface, yet the breaches detailed here are rooted in timeless flaws: over-privilege, inadequate input validation, and insufficient isolation.

\n

AI fundamentally changes the interface, but not the fundamentals of security. To secure the AI era, we must rigorously apply old-school principles of least privilege and zero-trust to these powerful new software components.

\n

As adoption accelerates, organisations must treat MCP surfaces with the same seriousness as API gateways, CI/CD pipelines, and Cloud IAM.

\n

Because attackers already are.

", + "url": "https://authzed.com/blog/timeline-mcp-breaches", + "title": "A Timeline of Model Context Protocol (MCP) Security Breaches", + "summary": "AI fundamentally changes the interface, but not the fundamentals of security. Here's a timeline of security breaches in MCP Servers from the recent past.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-11-25T18:18:00.000Z", + "date_published": "2025-11-25T18:18:00.000Z", + "author": { + "name": "Sohan Maheshwar", + "url": "https://www.linkedin.com/in/sohanmaheshwar/" + } + }, + { + "id": "https://authzed.com/blog/building-a-multi-tenant-rag-with-fine-grain-authorization-using-motia-and-spicedb", + "content_html": "
\n

Learn how to build a complete retrieval-augmented generation pipeline with multi-tenant authorization using Motia's event-driven framework, OpenAI embeddings, Pinecone vector search, SpiceDB permissions, and natural language querying.

\n
\n

If I was hard-pressed to pick my favourite computer game of all time, I'd go with Stardew Valley (sorry, Dangerous Dave). The stats from my Nintendo Profile is all the proof you need:

\n

\"nintendo-stats\"

\n

Stardew Valley sits atop with 430 hours played and in second place is Mario Kart (not pictured) with ~45 hours played. That's a significant difference, and should indicate how much I adore this game.

\n

We've been talking about the importance of Fine-Grained Authorization and RAG recently, so when I sat down to build a sample usecase for a production-grade RAG with Fine-Grained Permissions, my immediate thought went to Stardew Valley.

\n

For those not familiar, Stardew Valley is a farm life simulation game where players manage a farm by clearing land, growing seasonal crops, and raising animals. So I thought I could build a logbook for a large farm that one could query using natural language processing. This usecase is ideal for RAG Pipelines (a technique that uses external data to improve the accuracy, relevancy, and usefulness of a LLM model’s output).

\n

I focused on building something that was as close to production-grade as possible (and perhaps strayed from the original intent of a single farm) where an organization can own farms and data from the farms. The farms contain harvest data and users can log and query data for the farms they're part of. This provides a sticky situation for the authorization model. How does a LLM know who has access to what data?

\n

Here's where SpiceDB and ReBAC was vital. By using metadata to indicate where the relevant embedings came from, the RAG system returned harvest data to the user only based on what data they had access to. In fact, OpenAI uses SpiceDB for their fine-grained authorization in ChatGPT Connectors using similar techniques.

\n

While I know my way around SpiceDB and authorization, I needed help to build out the other components for a production-grade harvest logbook. So I reached out to my friend Rohit Ghumare from Motia for his expertise. Motia.dev is a backend framework that unifies APIs, background jobs, workflows, and AI Agents into a single core primitive with built-in observability and state management

\n

Here's a photo of Rohit and myself at Kubecon Europe in 2025

\n

\"sohan

\n

What follows below is a tutorial-style post on building a Retrieval Augmented Generation system with fine-grained authorization using the Motia framework and SpiceDB. We'll use Pinecone as our vector database, and OpenAI as our LLM.

\n

What You'll Build

\n

In this tutorial, you'll create a complete RAG system with authorization that:

\n
    \n
  • Stores harvest data and automatically generates embeddings for semantic search
  • \n
  • Splits text into optimized chunks with overlap for better retrieval accuracy
  • \n
  • Implements fine-grained authorization using SpiceDB's relationship-based access control
  • \n
  • Queries harvest history using natural language with AI-powered responses
  • \n
  • Returns contextually relevant answers with source citations from vector search
  • \n
  • Supports multi-tenant access where users only see data they have permission to access
  • \n
  • Logs all queries and responses for audit trails in CSV or Google Sheets
  • \n
  • Runs as an event-driven workflow orchestrated through Motia's framework
  • \n
\n

By the end of the tutorial, you'll have a complete system that combines semantic search with multi-tenant authorization.

\n

Prerequisites

\n

Before starting the tutorial, ensure you have:

\n\n

Getting Started

\n

1. Create Your Motia Project

\n

Create a new Motia project using the CLI:

\n
npx motia@latest create\n
\n

The installer will prompt you:

\n
    \n
  1. Template: Select Base (TypeScript)
  2. \n
  3. Project name: Enter harvest-logbook-rag
  4. \n
  5. Proceed? Type Yes
  6. \n
\n

Navigate into your project:

\n
cd harvest-logbook-rag\n
\n

Your initial project structure:

\n
harvest-logbook-rag/\n├── src/\n│   └── services/\n│       └── pet-store/\n├── steps/\n│   └── petstore/\n├── .env\n└── package.json\n
\n

The default template includes a pet store example. We'll replace this with our harvest logbook system. For more on Motia basics, see the Quick Start guide.

\n

2. Install Dependencies

\n

Install the SpiceDB client for authorization:

\n
npm install @authzed/authzed-node\n
\n

This is the only additional package needed.

\n

3. Setup Pinecone

\n

Pinecone will store the vector embeddings for semantic search.

\n

Create a Pinecone Account

\n
    \n
  1. Go to app.pinecone.io and sign up
  2. \n
  3. Create a new project
  4. \n
\n

Create an Index

\n
    \n
  1. \n

    Click Create Index

    \n
  2. \n
  3. \n

    Configure:

    \n
      \n
    • Name: harvest-logbook (or your preference)
    • \n
    • Dimensions: 1536 (for OpenAI embeddings)
    • \n
    • Metric: cosine
    • \n
    \n
  4. \n
  5. \n

    Click Create Index

    \n
  6. \n
\n

Get Your Credentials

\n
    \n
  1. Go to API Keys in the sidebar
  2. \n
  3. Copy your API Key
  4. \n
  5. Go back to your index
  6. \n
  7. Click the Connect tab
  8. \n
  9. Copy the Host (looks like: your-index-abc123.svc.us-east-1.pinecone.io)
  10. \n
\n

Save these for the next step.

\n

4. Setup SpiceDB

\n

SpiceDB handles authorization and access control for the system.

\n

Start SpiceDB with Docker

\n

Run this command to start SpiceDB locally:

\n
docker run -d \\\n  --name spicedb \\\n  -p 50051:50051 \\\n  authzed/spicedb serve \\\n  --grpc-preshared-key \"sometoken\"\n
\n

Verify SpiceDB is Running

\n

Check that the container is running:

\n
docker ps | grep spicedb\n
\n

You should see output similar to:

\n
6316f6cb50b4   authzed/spicedb   \"spicedb serve --grp…\"   31 seconds ago   Up 31 seconds   0.0.0.0:50051->50051/tcp   spicedb\n
\n

SpiceDB is now running on localhost:50051 and ready to handle authorization checks.

\n

5. Configure Environment Variables

\n

Create a .env file in the project root:

\n
# OpenAI (Required for embeddings and chat)\nOPENAI_API_KEY=sk-proj-xxxxxxxxxxxxx\n\n# Pinecone (Required for vector storage)\nPINECONE_API_KEY=pcsk_xxxxxxxxxxxxx\nPINECONE_INDEX_HOST=your-index-abc123.svc.us-east-1.pinecone.io\n\n# SpiceDB (Required for authorization)\nSPICEDB_ENDPOINT=localhost:50051\nSPICEDB_TOKEN=sometoken\n\n# LLM Configuration (OpenAI is default)\nUSE_OPENAI_CHAT=true\n\n# Logging Configuration (CSV is default)\nUSE_CSV_LOGGER=true\n
\n

Replace the placeholder values with your actual credentials from the previous steps.

\n

6. Initialize SpiceDB Schema

\n

SpiceDB needs a schema that defines the authorization model for organizations, farms, and users.

\n

Create the Schema File

\n

Create src/services/harvest-logbook/spicedb.schema with the authorization model. A SpiceDB schema defines the types of objects found your application, how those objects can relate to one another, and the permissions that can be computed off of those relations.

\n

Here's a snippet of the schema that defines user, organization and farm and the relations and permissions between them.

\n
definition user {}\n\ndefinition organization {\n    relation admin: user\n    relation member: user\n    \n    permission view = admin + member\n    permission edit = admin + member\n    permission query = admin + member\n    permission manage = admin\n}\n\ndefinition farm {\n    relation organization: organization\n    relation owner: user\n    relation editor: user\n    relation viewer: user\n    \n    permission view = viewer + editor + owner + organization->view\n    permission edit = editor + owner + organization->edit\n    permission query = viewer + editor + owner + organization->query\n    permission manage = owner + organization->admin\n}\n
\n

View the complete schema on GitHub

\n

The schema establishes:

\n
    \n
  • Organizations with admins and members
  • \n
  • Farms with owners, editors, and viewers
  • \n
  • Harvest entries linked to farms
  • \n
  • Permission inheritance (org members can access farms in their org)
  • \n
\n

Create Setup Scripts

\n

Create a scripts/ folder and add three files:

\n

scripts/setup-spicedb-schema.ts - Reads the schema file and writes it to SpiceDB
\nView on GitHub

\n

scripts/verify-spicedb-schema.ts - Verifies the schema was written correctly
\nView on GitHub

\n

scripts/create-sample-permissions.ts - Creates sample users and permissions for testing
\nView on GitHub

\n

Install Script Runner

\n
npm install -D tsx\n
\n

Add Scripts to package.json

\n
\"scripts\": {\n  \"spicedb:setup\": \"tsx scripts/setup-spicedb-schema.ts\",\n  \"spicedb:verify\": \"tsx scripts/verify-spicedb-schema.ts\",\n  \"spicedb:sample\": \"tsx scripts/create-sample-permissions.ts\"\n}\n
\n

Run the Setup

\n
# Write schema to SpiceDB\nnpm run spicedb:setup\n
\n

You should see output confirming the schema was written successfully:\n\"image\"

\n

Verify it was written correctly:

\n
npm run spicedb:verify\n
\n

This displays the complete authorization schema showing all definitions and permissions:\n\"image\"

\n

The output shows:

\n
    \n
  • farm definition with owner/editor/viewer roles
  • \n
  • harvest_entry definition linked to farms
  • \n
  • organization definition with admin/member roles
  • \n
  • query_session definition for RAG queries
  • \n
  • Permission rules for each resource type
  • \n
\n

Create sample user (user_alice as owner of farm_1):

\n
npm run spicedb:sample\n
\n

\"image\"

\n

This creates user_alice as owner of farm_1, ready for testing.

\n

Your authorization system is now ready.

\n

7. Start Development Server

\n

Start the Motia development server:

\n
npm run dev\n
\n

The server starts at http://localhost:3000. Open this URL in your browser to see the Motia Workbench.

\n

You'll see the default pet store example. We'll replace this with our harvest logbook system in the next sections.

\n

\"image\"

\n

Your development environment is now ready. All services are connected:

\n
    \n
  • Motia running on localhost:3000
  • \n
  • Pinecone index created and connected
  • \n
  • SpiceDB running with schema loaded
  • \n
  • Sample permissions created (user_alice owns farm_1)
  • \n
\n

Exploring the Project

\n

Before we start building, let's understand the architecture we're creating.

\n

System Architecture

\n
┌─────────────────────────────────────────────────────────────┐\n│  POST /harvest_logbook                                      │\n│  (Store harvest data + optional query)                      │\n└─────────┬───────────────────────────────────────────────────┘\n          │\n          ├─→ Authorization Middleware (SpiceDB)\n          │   - Check user has 'edit' permission on farm\n          │\n          ├─→ ReceiveHarvestData Step (API)\n          │   - Validate input\n          │   - Emit events\n          │\n          ├─→ ProcessEmbeddings Step (Event)\n          │   - Split text into chunks (400 chars, 40 overlap)\n          │   - Generate embeddings (OpenAI)\n          │   - Store vectors (Pinecone)\n          │\n          └─→ QueryAgent Step (Event) [if query provided]\n              - Retrieve similar content (Pinecone)\n              - Generate response (OpenAI/HuggingFace)\n              - Emit logging event\n              │\n              └─→ LogToSheets Step (Event)\n                  - Log query & response (CSV/Sheets)\n
\n

The RAG Pipeline

\n

Our system processes harvest data through these stages:

\n
    \n
  1. API Entry - Receive harvest data via REST endpoint
  2. \n
  3. Text Chunking - Split content into overlapping chunks (400 chars, 40 overlap)
  4. \n
  5. Embedding Generation - Convert chunks to vectors using OpenAI
  6. \n
  7. Vector Storage - Store embeddings in Pinecone for semantic search
  8. \n
  9. Query Processing - Search vectors and generate AI responses
  10. \n
  11. Audit Logging - Log all queries and responses
  12. \n
\n

Event-Driven Architecture

\n

The system uses Motia's event-driven model:

\n
    \n
  • API Steps handle HTTP requests
  • \n
  • Event Steps process background tasks
  • \n
  • Steps communicate by emitting and subscribing to events
  • \n
  • Each step is independent and can be tested separately
  • \n
\n

Authorization Layer

\n

Every API request passes through SpiceDB authorization:

\n
    \n
  • Users have relationships with resources (owner, editor, viewer)
  • \n
  • Permissions are checked before processing requests
  • \n
  • Multi-tenant by design (users only access their farms)
  • \n
\n

What We'll Build

\n

We'll create five main steps:

\n
    \n
  1. ReceiveHarvestData - API endpoint to store harvest entries
  2. \n
  3. ProcessEmbeddings - Event handler for generating and storing embeddings
  4. \n
  5. QueryAgent - Event handler for AI-powered queries
  6. \n
  7. QueryOnly - Separate API endpoint for querying without storing data
  8. \n
  9. LogToSheets - Event handler for audit logging
  10. \n
\n

Each component is a single file in the steps/ directory. Motia automatically discovers and connects them based on the events they emit and subscribe to.

\n

Step 1: Create the Harvest Entry API

\n

What We're Building

\n

In this step, we'll create an API endpoint that receives harvest log data and triggers the processing pipeline. This is the entry point that starts the entire RAG workflow.

\n

Why This Step Matters

\n

Every workflow needs an entry point. In Motia, API steps serve as the gateway between external requests and your event-driven system. By using Motia's api step type, you get automatic HTTP routing, request validation, and event emission, all without writing boilerplate server code. When a farmer calls this endpoint with their harvest data, it validates the input, checks authorization, stores the entry, and emits events that trigger the embedding generation and optional query processing.

\n

Create the Step File

\n

Create a new file at steps/harvest-logbook/receive-harvest-data.step.ts.

\n
\n

The complete source code for all steps is available on GitHub. You can reference the working implementation at any time.

\n
\n

View the complete Step 1 code on GitHub →

\n

\"image\"

\n

Now let's understand the key parts you'll be implementing:

\n

Input Validation

\n
const bodySchema = z.object({\n  content: z.string().min(1, 'Content cannot be empty'),\n  farmId: z.string().min(1, 'Farm ID is required for authorization'),\n  metadata: z.record(z.any()).optional(),\n  query: z.string().optional()\n});\n
\n

Zod validates that requests include the harvest content and farm ID. The query field is optional - if provided, the system will also answer a natural language question about the data after storing it.

\n

Step Configuration

\n
export const config: ApiRouteConfig = {\n  type: 'api',\n  name: 'ReceiveHarvestData',\n  path: '/harvest_logbook',\n  method: 'POST',\n  middleware: [errorHandlerMiddleware, harvestEntryEditMiddleware],\n  emits: ['process-embeddings', 'query-agent'],\n  bodySchema\n};\n
\n
    \n
  • type: 'api' makes this an HTTP endpoint
  • \n
  • middleware runs authorization checks before the handler
  • \n
  • emits declares this step triggers embedding processing and optional query events
  • \n
  • Motia handles all the routing automatically
  • \n
\n

Authorization Check

\n
middleware: [errorHandlerMiddleware, harvestEntryEditMiddleware]\n
\n

The harvestEntryEditMiddleware checks SpiceDB to ensure the user has edit permission on the specified farm. If authorization fails, the request is rejected before reaching the handler. Authorization info is added to the request for use in the handler.

\n

View authorization middleware →

\n

Handler Logic

\n
export const handler: Handlers['ReceiveHarvestData'] = async (req, { emit, logger, state }) => {\n  const { content, farmId, metadata, query } = bodySchema.parse(req.body);\n  const entryId = `harvest-${Date.now()}`;\n  \n  // Store entry data in state\n  await state.set('harvest-entries', entryId, {\n    content, farmId, metadata, timestamp: new Date().toISOString()\n  });\n  \n  // Emit event to process embeddings\n  await emit({\n    topic: 'process-embeddings',\n    data: { entryId, content, metadata }\n  });\n};\n
\n

The handler generates a unique entry ID, stores the data in Motia's state management, and emits an event to trigger embedding processing. If a query was provided, it also emits a query-agent event.

\n

Event Emission

\n
await emit({\n  topic: 'process-embeddings',\n  data: { entryId, content, metadata: { ...metadata, farmId, userId } }\n});\n\nif (query) {\n  await emit({\n    topic: 'query-agent',\n    data: { entryId, query }\n  });\n}\n
\n

Events are how Motia steps communicate. The process-embeddings event triggers the next step to chunk the text and generate embeddings. If a query was provided, the query-agent event runs in parallel to answer the question using RAG.

\n

This keeps the API response fast as it returns immediately while processing happens in the background.

\n

Test the Step

\n

Open the Motia Workbench and test this endpoint:

\n
    \n
  1. Click on the harvest-logbook flow
  2. \n
  3. Find POST /harvest_logbook in the sidebar
  4. \n
  5. Click on it to open the request panel
  6. \n
  7. Switch to the Headers tab and add:
  8. \n
\n
   {\n     \"x-user-id\": \"user_alice\"\n   }\n
\n
    \n
  1. Switch to the Body tab and add:
  2. \n
\n
   {\n     \"content\": \"Harvested 500kg of tomatoes from field A. Weather was sunny.\",\n     \"farmId\": \"farm_1\",\n     \"metadata\": {\n       \"field\": \"A\",\n       \"crop\": \"tomatoes\"\n     }\n   }\n
\n
    \n
  1. Click Send button.
  2. \n
\n

You should see a success response with the entry ID. The Workbench will show the workflow executing in real-time, with events flowing to the next steps.

\n

\n

Step 2: Process Embeddings

\n

What We're Building

\n

This event handler takes the harvest data from Step 1, splits it into chunks, generates vector embeddings, and stores them in Pinecone for semantic search.

\n

Why This Step Matters

\n

RAG systems need to break down large text into smaller chunks for better retrieval accuracy. By chunking text with overlap and generating embeddings for each piece, we enable semantic search that finds relevant context even when queries don't match exact keywords.

\n

This step runs in the background after the API returns, keeping the user experience fast while handling the background work of embedding generation and vector storage.

\n

Create the Step File

\n

Create a new file at steps/harvest-logbook/process-embeddings.step.ts.

\n

View the complete Step 2 code on GitHub →

\n

Now let's understand the key parts you'll be implementing:

\n

Input Schema

\n
const inputSchema = z.object({\n  entryId: z.string(),\n  content: z.string(),\n  metadata: z.record(z.any()).optional()\n});\n
\n

This step receives the entry ID, content, and metadata from the previous step's event emission.

\n

Step Configuration

\n
export const config: EventConfig = {\n  type: 'event',\n  name: 'ProcessEmbeddings',\n  subscribes: ['process-embeddings'],\n  emits: [],\n  input: inputSchema\n};\n
\n
    \n
  • type: 'event' makes this a background event handler
  • \n
  • subscribes: ['process-embeddings'] listens for events from Step 1
  • \n
  • No emits - this is the end of the embedding pipeline
  • \n
\n

Text Chunking

\n
const vectorIds = await HarvestLogbookService.storeEntry({\n  id: entryId,\n  content,\n  metadata,\n  timestamp: new Date().toISOString()\n});\n
\n

The service handles text splitting (400 character chunks with 40 character overlap), embedding generation via OpenAI, and storage in Pinecone. This chunking strategy ensures semantic continuity across chunks.

\n

View text splitter service →

\n

Embedding Generation

\n

The OpenAI service generates 1536-dimension embeddings for each text chunk using the text-embedding-ada-002 model.

\n

View OpenAI service →

\n

Vector Storage

\n
await state.set('harvest-vectors', entryId, {\n  vectorIds,\n  processedAt: new Date().toISOString(),\n  chunkCount: vectorIds.length\n});\n
\n

After storing vectors in Pinecone, the step updates Motia's state with the vector IDs for tracking. Each chunk gets a unique ID like harvest-123-chunk-0, harvest-123-chunk-1, etc.

\n

View Pinecone service →

\n

The embeddings are now stored and ready for semantic search when users query the system.

\n

Test the Step

\n

Step 2 runs automatically when Step 1 emits the process-embeddings event. To test it:

\n
    \n
  1. \n

    Send a request to the POST /harvest_logbook endpoint (from Step 1)

    \n
  2. \n
  3. \n

    In the Workbench, watch the workflow visualization

    \n
  4. \n
  5. \n

    You'll see the ProcessEmbeddings step activate automatically

    \n
  6. \n
  7. \n

    Check the Logs tab at the bottom to see:

    \n
      \n
    • Text chunking progress
    • \n
    • Embedding generation
    • \n
    • Vector storage confirmation
    • \n
    \n
  8. \n
\n

The step completes when you see \"Successfully stored embeddings\" in the logs. The vectors are now in Pinecone and ready for semantic search.

\n

\n

Step 3: Query Agent

\n

What We're Building

\n

This event handler performs the RAG query, it searches Pinecone for relevant content, retrieves matching chunks, and uses an LLM to generate natural language responses based on the retrieved context.

\n

Why This Step Matters

\n

This is where retrieval-augmented generation happens. Instead of the LLM generating responses from its training data alone, it uses actual harvest data from Pinecone as context. This ensures accurate, source-backed answers specific to the user's farm data.

\n

The step supports both OpenAI and HuggingFace LLMs, giving you flexibility in choosing your AI provider based on cost and performance needs.

\n

Create the Step File

\n

Create a new file at steps/harvest-logbook/query-agent.step.ts.

\n

View the complete Step 3 code on GitHub →

\n

Now let's understand the key parts you'll be implementing:

\n

Input Schema

\n
const inputSchema = z.object({\n  entryId: z.string(),\n  query: z.string(),\n  conversationHistory: z.array(z.object({\n    role: z.enum(['user', 'assistant', 'system']),\n    content: z.string()\n  })).optional()\n});\n
\n

The step receives the query text and optional conversation history for multi-turn conversations.

\n

Step Configuration

\n
export const config: EventConfig = {\n  type: 'event',\n  name: 'QueryAgent',\n  subscribes: ['query-agent'],\n  emits: ['log-to-sheets'],\n  input: inputSchema\n};\n
\n
    \n
  • subscribes: ['query-agent'] listens for query events from Step 1
  • \n
  • emits: ['log-to-sheets'] triggers logging after generating response
  • \n
\n

RAG Query Process

\n
const agentResponse = await HarvestLogbookService.queryWithAgent({\n  query,\n  conversationHistory\n});\n
\n

The service orchestrates the RAG pipeline: embedding the query, searching Pinecone for similar vectors, extracting context from top matches, and generating a response using the LLM.

\n

View RAG orchestration service →

\n

Vector Search

\n

The query is embedded using OpenAI and searched against Pinecone to find the top 5 most similar chunks. Each result includes a similarity score and the original text.

\n

View Pinecone query implementation →

\n

LLM Response Generation

\n
await state.set('agent-responses', entryId, {\n  query,\n  response: agentResponse.response,\n  sources: agentResponse.sources,\n  timestamp: agentResponse.timestamp\n});\n
\n

The LLM generates a response using the retrieved context. The system supports both OpenAI (default) and HuggingFace, controlled by the USE_OPENAI_CHAT environment variable. The response includes source citations showing which harvest entries informed the answer.

\n

View OpenAI chat service →
\nView HuggingFace service →

\n

Event Emission

\n
await emit({\n  topic: 'log-to-sheets',\n  data: {\n    entryId,\n    query,\n    response: agentResponse.response,\n    sources: agentResponse.sources\n  }\n});\n
\n

After generating the response, the step emits a logging event to create an audit trail of all queries and responses.

\n

Test the Step

\n

Step 3 runs automatically when you include a query field in the Step 1 request. To test it:

\n
    \n
  1. Send a request to POST /harvest_logbook with a query:
  2. \n
\n
   {\n     \"content\": \"Harvested 500kg of tomatoes from field A. Weather was sunny.\",\n     \"farmId\": \"farm_1\",\n     \"query\": \"What crops did we harvest?\"\n   }\n
\n
    \n
  1. \n

    In the Workbench, watch the QueryAgent step activate

    \n
  2. \n
  3. \n

    Check the Logs tab to see:

    \n
      \n
    • Query embedding generation
    • \n
    • Vector search in Pinecone
    • \n
    • LLM response generation
    • \n
    • Source citations
    • \n
    \n
  4. \n
\n

The step completes when you see the AI-generated response in the logs. The query and response are automatically logged by Step 5.

\n

\n

Step 4: Query-Only Endpoint

\n

What We're Building

\n

This API endpoint allows users to query their existing harvest data without storing new entries. It's a separate endpoint dedicated purely to RAG queries.

\n

Why This Step Matters

\n

While Step 1 handles both storing and optionally querying data, users often need to just ask questions about their existing harvest logs. This dedicated endpoint keeps the API clean and focused - one endpoint for data entry, another for pure queries.

\n

This separation also makes it easier to apply different rate limits or permissions between data modification and read-only operations.

\n

Create the Step File

\n

Create a new file at steps/harvest-logbook/query-only.step.ts.

\n

View the complete Step 4 code on GitHub →

\n

Now let's understand the key parts you'll be implementing:

\n

Input Validation

\n
const bodySchema = z.object({\n  query: z.string().min(1, 'Query cannot be empty'),\n  farmId: z.string().min(1, 'Farm ID is required for authorization'),\n  conversationHistory: z.array(z.object({\n    role: z.enum(['user', 'assistant', 'system']),\n    content: z.string()\n  })).optional()\n});\n
\n

The request requires a query and farm ID. Conversation history is optional for multi-turn conversations.

\n

Step Configuration

\n
export const config: ApiRouteConfig = {\n  type: 'api',\n  name: 'QueryHarvestLogbook',\n  path: '/harvest_logbook/query',\n  method: 'POST',\n  middleware: [errorHandlerMiddleware, harvestQueryMiddleware],\n  emits: ['query-agent']\n};\n
\n
    \n
  • path: '/harvest_logbook/query' creates a dedicated query endpoint
  • \n
  • harvestQueryMiddleware checks for query permission (not edit)
  • \n
  • emits: ['query-agent'] triggers the same RAG query handler as Step 3
  • \n
\n

Authorization Middleware

\n
middleware: [errorHandlerMiddleware, harvestQueryMiddleware]\n
\n

The harvestQueryMiddleware checks SpiceDB for query permission. This is less restrictive than edit - viewers can query but cannot modify data.

\n

View authorization middleware →

\n

Handler Logic

\n
export const handler: Handlers['QueryHarvestLogbook'] = async (req, { emit, logger }) => {\n  const { query, farmId } = bodySchema.parse(req.body);\n  const queryId = `query-${Date.now()}`;\n  \n  await emit({\n    topic: 'query-agent',\n    data: { entryId: queryId, query }\n  });\n  \n  return {\n    status: 200,\n    body: { success: true, queryId }\n  };\n};\n
\n

The handler generates a unique query ID and emits the same query-agent event used in Step 1. This reuses the RAG pipeline from Step 3 without duplicating code.

\n

The API returns immediately with the query ID. The actual processing happens in the background, and results are logged by Step 5.

\n

Test the Step

\n

This is the dedicated query endpoint. Test it directly:

\n
    \n
  1. Click on POST /harvest_logbook/query in the Workbench
  2. \n
  3. Add the header:
  4. \n
\n
   {\n     \"x-user-id\": \"user_alice\"\n   }\n
\n
    \n
  1. Add the body:
  2. \n
\n
   {\n     \"query\": \"What crops did we harvest?\",\n     \"farmId\": \"farm_1\"\n   }\n
\n
    \n
  1. Click Send
  2. \n
\n

You'll see a 200 OK response with the query ID. In the Logs tab, watch for:

\n
    \n
  • QueryHarvestLogbook - Authorization and query received
  • \n
  • QueryAgent - Querying AI agent
  • \n
  • QueryAgent - Agent query completed
  • \n
\n

The query runs in the background and results are logged by Step 5. This endpoint is perfect for read-only query operations without storing new data.

\n

\n

Step 5: Log to Sheets

\n

What We're Building

\n

This event handler creates an audit trail by logging every query and its AI-generated response. It supports both local CSV files (for development) and Google Sheets (for production).

\n

Why This Step Matters

\n

Audit logs are essential for understanding how users interact with your system. They help with debugging, monitoring usage patterns, and maintaining compliance. By logging queries and responses, you can track what questions users ask, identify common patterns, and improve the system over time.

\n

The dual logging strategy (CSV/Google Sheets) gives you flexibility, use CSV locally for quick testing, then switch to Google Sheets for production without changing code.

\n

Create the Step File

\n

Create a new file at steps/harvest-logbook/log-to-sheets.step.ts.

\n

View the complete Step 5 code on GitHub →

\n

Now let's understand the key parts you'll be implementing:

\n

Input Schema

\n
const inputSchema = z.object({\n  entryId: z.string(),\n  query: z.string(),\n  response: z.string(),\n  sources: z.array(z.string()).optional()\n});\n
\n

The step receives the query, AI response, and optional source citations from Step 3.

\n

Step Configuration

\n
export const config: EventConfig = {\n  type: 'event',\n  name: 'LogToSheets',\n  subscribes: ['log-to-sheets'],\n  emits: [],\n  input: inputSchema\n};\n
\n
    \n
  • subscribes: ['log-to-sheets'] listens for logging events from Step 3
  • \n
  • No emits - this is the end of the workflow
  • \n
\n

Logging Service Selection

\n
const useCSV = process.env.USE_CSV_LOGGER === 'true' || !process.env.GOOGLE_SHEETS_ID;\n\nawait HarvestLogbookService.logToSheets(query, response, sources);\n
\n

The service automatically chooses between CSV and Google Sheets based on environment variables. This keeps the step code simple while supporting different deployment scenarios.

\n

View CSV logger →
\nView Google Sheets service →

\n

Error Handling

\n
try {\n  await HarvestLogbookService.logToSheets(query, response, sources);\n  logger.info(`Successfully logged to ${destination}`);\n} catch (error) {\n  logger.error('Failed to log query response');\n  // Don't throw - logging failures shouldn't break the main flow\n}\n
\n

The step catches logging errors without throwing. This ensures that even if logging fails, the main workflow completes successfully. Users get their query results even if the audit log has issues.

\n

CSV Output Format

\n

The CSV logger saves entries to logs/harvest_logbook.csv with these columns:

\n
    \n
  • Timestamp
  • \n
  • Query
  • \n
  • Response
  • \n
  • Sources (comma-separated)
  • \n
\n

Each entry is automatically escaped to handle quotes and commas in the content.

\n

Test the Step

\n

Step 5 runs automatically after Step 3 completes. To verify it's working:

\n
    \n
  1. Run a query using POST /harvest_logbook/query
  2. \n
  3. Check the Logs tab for LogToSheets entries
  4. \n
  5. Verify the CSV file was created:
  6. \n
\n
   cat logs/harvest_logbook.csv\n
\n

\n

You should see your query and response logged with a timestamp. Each subsequent query appends a new row to the CSV file.

\n

\"image\"

\n

Testing the System

\n

Now that all steps are built, let's test the complete workflow using the Motia Workbench.

\n

Start the Server

\n
npm run dev\n
\n

Open http://localhost:3000 in your browser to access the Workbench.

\n

Test 1: Store Harvest Data

\n
    \n
  1. Select the harvest-logbook flow from the dropdown
  2. \n
  3. Find the POST /harvest_logbook endpoint in the workflow
  4. \n
  5. Click on it to open the request panel
  6. \n
  7. Add the authorization header:
  8. \n
\n
   {\n     \"x-user-id\": \"user_alice\"\n   }\n
\n
    \n
  1. Set the request body:
  2. \n
\n
   {\n     \"content\": \"Harvested 500kg of tomatoes from field A. Weather was sunny, no pest damage observed.\",\n     \"farmId\": \"farm_1\",\n     \"metadata\": {\n       \"field\": \"A\",\n       \"crop\": \"tomatoes\",\n       \"weight_kg\": 500\n     }\n   }\n
\n
    \n
  1. Click Play Button
  2. \n
\n

Watch the workflow execute in real-time. You'll see:

\n
    \n
  • Authorization check passes (user_alice has edit permission)
  • \n
  • Text chunked into embeddings
  • \n
  • Vectors stored in Pinecone
  • \n
  • Success response returned
  • \n
\n

Test 2: Query the Data

\n
    \n
  1. Find the POST /harvest_logbook/query endpoint
  2. \n
  3. Add the authorization header:
  4. \n
\n
   {\n     \"x-user-id\": \"user_alice\"\n   }\n
\n
    \n
  1. Set the request body:
  2. \n
\n
   {\n     \"farmId\": \"farm_1\",\n     \"query\": \"What crops did we harvest recently?\"\n   }\n
\n
    \n
  1. Click Send
  2. \n
\n

Watch the RAG pipeline execute:

\n
    \n
  • Query embedded via OpenAI
  • \n
  • Similar vectors retrieved from Pinecone
  • \n
  • AI generates response with context
  • \n
  • Query and response logged to CSV
  • \n
\n

Test 3: Verify Authorization

\n

Try querying as a user without permission:

\n
    \n
  1. Use the same query endpoint
  2. \n
  3. Change the header:
  4. \n
\n
   {\n     \"x-user-id\": \"user_unauthorized\"\n   }\n
\n
    \n
  1. Click Send
  2. \n
\n

You'll see a 403 Forbidden response to verify if authorization works correctly.\nYou can also create different users with different levels of access and see fine-grained authorization in action.

\n

View the Logs

\n

Check the audit trail:

\n
cat logs/harvest_logbook.csv\n
\n

You'll see all queries and responses logged with timestamps.

\n

The Workbench also provides trace visualization showing exactly how data flows through each step, making debugging straightforward.

\n

Conclusion

\n

You've built a complete RAG system with multi-tenant authorization using Motia's event-driven framework. You learned how to:

\n
    \n
  1. Build event-driven workflows with Motia steps
  2. \n
  3. Implement RAG with text chunking, embeddings, and vector search
  4. \n
  5. Add fine-grained authorization using SpiceDB's relationship model
  6. \n
  7. Handle async operations with event emission
  8. \n
  9. Integrate multiple services (OpenAI, Pinecone, SpiceDB)
  10. \n
\n

Your system now handles:

\n
    \n
  • Semantic search over harvest data with AI-powered embeddings
  • \n
  • Natural language querying with contextually relevant answers
  • \n
  • Multi-tenant access control with role-based permissions
  • \n
  • Event-driven processing for fast API responses
  • \n
  • Audit logging for compliance and debugging
  • \n
  • Flexible LLM options (OpenAI or HuggingFace)
  • \n
\n

Your RAG system is ready to help farmers query their harvest data naturally while keeping data secure with proper authorization.

\n

Final Thoughts

\n

This was a fun exercise in tackling a complex authorization problem and also building something production-grade. I also got to play out some of my Stardew Valley fancies IRL. Maybe it's time I actually move to a cozy farm and grow my own crops (so long as the farm has a good Internet connection!)

\n

\"stardew\"

\n

The repository can be found on the Motia GitHub.

\n

Feel free to reach out to us on LinkedIn or jump into the SpiceDB Discord if you have any questions. Happy farming!

", + "url": "https://authzed.com/blog/building-a-multi-tenant-rag-with-fine-grain-authorization-using-motia-and-spicedb", + "title": "Build a Multi-Tenant RAG with Fine-Grain Authorization using Motia and SpiceDB", + "summary": "Learn how to build a complete retrieval-augmented generation pipeline with multi-tenant authorization using Motia's event-driven framework, OpenAI embeddings, Pinecone vector search, SpiceDB permissions, and natural language querying.", + "image": "https://authzed.com/images/blogs/motia-spicedb.png", + "date_modified": "2025-11-18T22:56:00.000Z", + "date_published": "2025-11-18T17:30:00.000Z", + "author": { + "name": "Sohan Maheshwar", + "url": "https://www.linkedin.com/in/sohanmaheshwar/" + } + }, + { + "id": "https://authzed.com/blog/terraform-and-opentofu-provider-for-authzed-dedicated", + "content_html": "

Today, AuthZed is excited to introduce the Terraform and OpenTofu Provider for AuthZed Dedicated, giving customers a powerful way to manage their authorization infrastructure using industry standard best practices.

\n

With this new provider, teams can define, version, and automate their resources in the AuthZed Cloud Platform - entirely through declarative infrastructure-as-code. This makes it easier than ever to integrate authorization management into existing operational workflows.

\n

Why It Matters

\n

Modern infrastructure teams rely on Terraform and OpenTofu to manage everything from compute resources to networking and identity. With the new AuthZed provider, you can now manage your authorization layer in the same way — improving consistency, reducing manual configuration, and enabling repeatable deployments across environments.

\n

What You Can Manage

\n

The Terraform and OpenTofu provider automates key components of your AuthZed Dedicated environment, including:

\n
    \n
  • Service Accounts - Create and manage programmatic access to your permission systems
  • \n
  • API Tokens - Securely provision and rotate tokens for authentication
  • \n
  • Roles and Policies - Define and apply fine-grained access control
  • \n
  • Permissions System Configuration - Maintain visibility and control over your authorization models
  • \n
\n

And we’re working to support additional resources in AuthZed Dedicated environments, including managing Permissions Systems.

\n

Example Usage

\n

Below is a simple example of how to create a service account using the AuthZed Terraform provider:

\n
provider \"authzed\" {\n  token = var.authzed_token\n}\n\nresource \"authzed_service_account\" \"example\" {\n  name        = \"ci-cd-access\"\n  description = \"Service account for CI/CD pipeline\"\n}\n
\n

This snippet demonstrates how straightforward it is to manage AuthZed resources alongside your existing infrastructure definitions.

\n

Seamless Integration

\n

The introduction of the Terraform and OpenTofu provider makes it effortless to manage authorization infrastructure as code — ensuring your permission systems evolve safely and consistently as your organization scales.

\n

For AuthZed customers interested in using the Terraform and OpenTofu provider, please contact your account manager for access.

\n

To explore the provider and get started, visit the AuthZed Terraform Provider on GitHub.

\n

Not an AuthZed customer, but want to take the technology for a spin? Sign up for AuthZed Cloud today to try it out.

", + "url": "https://authzed.com/blog/terraform-and-opentofu-provider-for-authzed-dedicated", + "title": "Terraform and OpenTofu Provider for AuthZed Dedicated", + "summary": "AuthZed now supports Terraform and OpenTofu. You can manage service accounts, API tokens, roles, and permission system configuration as code, just like your other infrastructure. Define resources declaratively, version them in git, and automate deployments across environments without manual configuration steps.", + "image": "https://authzed.com/images/blogs/opentofu-terraform-blog-image.png", + "date_modified": "2025-10-30T10:40:00.000Z", + "date_published": "2025-10-30T10:40:00.000Z", + "author": { + "name": "Veronica Lopez", + "url": "https://www.linkedin.com/in/veronica-lopez-8ba1b1256/" + } + }, + { + "id": "https://authzed.com/blog/why-were-not-renaming-the-company-authzed-ai", + "content_html": "

It has become popular for companies to align themselves with AI. For good reason! AI has the potential, and ever increasing likelihood, of fundamentally transforming the way that companies work. The hype is out of control! People breathlessly compare AI to the internet and the industrial revolution. And who knows; they could even be right!

\n

At AuthZed, a rapidly growing segment of our customers are AI first companies, including OpenAI. As we work with more AI companies on authorization for AI systems, we often get asked if we will rebrand as an AI company.

\n

Companies have realigned themselves to varying degrees. SalesForce may one day soon be called AgentForce. As an April Fool’s joke, one company started a rumor that Nvidia was going to rebrand as NvidAI, and I think a lot of people probably thought to themselves: “yeah, that tracks.” Mega corps such as Google, Meta, and IBM have .ai top level websites that outline their activities in the AI space.

\n

It can make a lot of sense! After all, unprecedented shifts require unprecedented attention, and a rising tide floats all boats. Well: we’re not. In this post I will lay out some of the pros and cons of going all in on AI branding and alignment, and explain our reasons for keeping our brand in place.

\n

A Rational Choice

\n

When considering such a drastic change, I believe each company is looking at the upsides and downsides of a rebrand given their specific situation (revenue, brand value, momentum, staff, etc.) and making a calculated choice that may only apply in their specific context. So what are some of the upsides and downsides?

\n

\"\"

\n

Risks

\n

The risks that I’ve been able to identify boil down to two areas: brand value and perception. Let’s start with brand value.

\n

Companies spend a lot of time and effort building their brand value. It is an intangible asset for companies that pays dividends in areas such as awareness, customer acquisition costs, and reach, just to name a few. Apple is widely considered to have the most valuable brand in the world, and BrandFinance currently values their brand at $575 billion, with a b. That’s approximately 15% of their $3.7 trillion market cap.

\n

When you rebrand by changing your company’s name, you can put all of that hard work at risk. By changing your name, you need to regain any lost brand mindshare. When you change your web address, you need to re-establish SEO and domain authority that was hard fought and hard won. If Apple rebranded to treefruit.ai (dibs btw) tomorrow, we would expect their sales, mindshare, and even email deliverability to go down.

\n

The second major risk category is around perception. By rebranding around AI you’re signaling a few things to the market. First, you're weighing the upside of being aligned with AI heavily. Second, you signal that you’re willing and able to follow the hype. These factors combined may change the perception of your company to potential buyers: from established, steady, successful, to trendy, fast-moving, up and coming.

\n

On a longer time horizon, we’ve also seen many such trends come and go. Web 1.0, Web 2.0, SoLoMo, Cloud, Crypto, VR/AR, and now AI. In all cases these hype movements have had a massive effect on the way people perceive technology, but they have also become less hyped over time, as a new trend has arrived to supplant them. With AI, I can guarantee that at some point we will achieve an equilibrium where the value prop has been mostly established, and the hype adjusts to fit. Do you want to be saddled with an AI-forward brand when that happens? Will you have been able to ride the wave long and high enough to establish an enduring company that can survive on its own? One of my favorite quotes from Warren Buffet may apply here: “Only when the tide goes out do you discover who's been swimming naked.”

\n

Rewards

\n

There are many upsides that companies can expect to reap as well! Hype is its own form of reality distortion field, and it causes a lot of people to act in ways that they might not have otherwise. FOMO, or fear of missing out, is a well established phenomenon that we can leverage to our benefit. Let’s take a look at who is acting differently in this hype cycle.

\n

Investors. If you are a startup that’s hoping to raise capital, you had better have either: insane fundamentals or an AI story. Carta recently released an analysis on how AI is affecting fundraising, with the TL;DR being that AI companies are absorbing a ton of the money, and that growing round sizes can primarily be attributed to the AI companies that are raising. Counter to all of the hype, user Xodarap over at LessWrong.com has produced an analysis on YC companies post GenAI hitting the scene, that paints a less rosy picture of the outcomes associated with primarily AI-based companies so far. It’s possible (probable?) that we are just too early in the cycle to have identified the clear winners and losers for AI.

\n

Vendors. If partnerships are a big part of your model, there are a lot of dollars floating around for partnerships that revolve around AI. I've had a marketing exec from a vendor tell me straight up: “all of our marketing dollars are earmarked only for AI related initiatives right now.” If you can tell a compelling story here, you will be able to find someone willing to help you amplify it.

\n

Businesses. Last, and certainly not least, businesses are also changing their behavior. If you’re a B2B company, your customers are all figuring out what their AI story is too. That means opportunity. They’re looking for vendors, partners, analysts, really anyone who can help them be successful with AI. Their boss told them: “We need an AI story or we’re going to get our lunch eaten! Make it happen!” So they’re out there trying to make it happen. Unfortunately, a study out of MIT recently proclaimed that “95% of generative AI pilots at companies are failing.”

\n

Mitigating Factors

\n

The world is never quite as cut and dry as we think it might be. The good news is, that you can still reap some of the reward without a full rebrand. At AuthZed, we’ve found that you can still tell your AI story, and court customers who are looking to advance their AI initiatives even if you’re not completely AI-native, or all-aboard the hype train. Unfortunately, I don’t have intuition or data for what the comparative advantage is of a rebrand compared to attempting to make waves under a more neutral brand.

\n

Our Calculus

\n

At AuthZed, our context-specific decision not to rebrand was based primarily on how neutral our solution is. While many companies, both AI and traditional, are having success with using AuthZed to secure RAG pipelines and AI agents, we also serve many customers who want to protect their data from unauthorized access by humans. Or to build that new sharing workflow that is going to unlock new revenue. Or break into the enterprise. Put succinctly: we think we would be doing the world a great disservice if our technology was only being used for AI-adjacent purposes.

\n

The other, less important reason why we’re not rebranding, is that at AuthZed we often take a slightly contrarian or longer view than whatever the current hype cycle might dictate. We try not to cargo-cult our business decisions. Following the pack is almost by definition a median-caliber decision. Median-caliber decisions are likely to sum up to a median company outcome. The median startup outcome is death or an unprofitable exit. At AuthZed, we think that the opportunity that we have to reshape the way that the world thinks about authorization shouldn’t be wasted.

\n

With that said, I’ve been wrong many times in the past. Too many to count even. “Never say never” are words to live by! Hopefully if and when the time comes where our personal calculus shifts in favor of a big rebrand, I can recognize the changing landscape and we can do what’s right for the company. What’s a little egg on your face when you’re on a mission to fix the way that companies across the world do authorization.

", + "url": "https://authzed.com/blog/why-were-not-renaming-the-company-authzed-ai", + "title": "Why we’re not renaming the company AuthZed.ai", + "summary": "Should your company rebrand as an AI company? We decided not to.\nAI companies attract outsized funding and partnership dollars. Yet rebranding means trading established brand value and customer mindshare for alignment with today's hottest trend.\nWe stayed brand-neutral because our authorization solution serves both AI and non-AI companies alike. Limiting ourselves to AI-only would be a disservice to our broader mission and the diverse customers who depend on us.", + "image": "https://authzed.com/images/blogs/authzed-ai-bg.png", + "date_modified": "2025-10-27T11:45:00.000Z", + "date_published": "2025-10-27T11:45:00.000Z", + "author": { + "name": "Jake Moshenko", + "url": "https://www.linkedin.com/in/jacob-moshenko-381161b/" + } + }, + { + "id": "https://authzed.com/blog/authzed-adds-microsoft-azure-support", + "content_html": "

Today, AuthZed is announcing support for Microsoft Azure in AuthZed Dedicated to provide more authorization infrastructure deployment options for customers.\nAuthZed now provides customers the opportunity to choose from all major cloud providers - AWS, Google Cloud and/or Microsoft Azure.

\n

\"Authzed

\n

AuthZed customers can now deploy authorization infrastructure to 23+ Azure regions to support their globally distributed applications.\nThis ensures fast, consistent permission decisions regardless of where your users are located.

\n
\n

\"I have been following the development of SpiceDB and AuthZed on how they are providing authorization infrastructure to companies of all sizes,\" said Lachlan Evenson, Principal PDM Manager, Azure Cloud Native Ecosystem.\n\"It's great to see their support for Microsoft Azure and we look forward to collaborating with AuthZed as they work with more Azure customers moving forward.\"

\n
\n

This launch is the direct result of customer demand. Many teams asked for Azure support, and now they have the ability to deploy authorization infrastructure in the cloud of their choice.

\n

\"AuthZed

\n

What is AuthZed Dedicated?

\n

AuthZed Dedicated is our managed service that provides fully private deployments of our cloud platform in our customer’s provider and regions of choice.\nThis gives users the benefits of a proven, production-ready authorization system—without the burden of building and maintaining it themselves.

\n

Industry leaders such as OpenAI, Workday, and Turo rely on AuthZed Dedicated for their authorization infrastructure:

\n
\n

“We decided to buy instead of build early on.\nThis is an authorization system with established patterns.\nWe didn’t want to reinvent the wheel when we could move fast with a proven solution.”\n— Member of Technical Staff, OpenAI

\n
\n

Get Started

\n

With Azure now available, you can deploy AuthZed Dedicated on the cloud of your choice.\nBook a call with our team to learn how AuthZed can power your authorization infrastructure.

", + "url": "https://authzed.com/blog/authzed-adds-microsoft-azure-support", + "title": "AuthZed Dedicated Now Available on Microsoft Azure", + "summary": "AuthZed now supports Microsoft Azure, giving customers the opportunity to choose from all major cloud providers - AWS, Google Cloud, and Microsoft Azure. Deploy authorization infrastructure to 23+ Azure regions for globally distributed applications.\n", + "image": "https://authzed.com/images/blogs/authzed-azure-support-og.png", + "date_modified": "2025-10-21T16:00:00.000Z", + "date_published": "2025-10-21T16:00:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/extended-t-augment-your-design-craft-with-ai-tools", + "content_html": "
\n

TL;DR
\nAI doesn't replace design judgment. It widens my T-shaped skill set by surfacing on-brand options quickly. It's still on me to uphold craft, taste, and standards for what ships.

\n
\n

Designers on small teams, especially at startups, default to being T-shaped: deep in a core craft and broad enough to support adjacent disciplines. My vertical is brand and visual identity, while my horizontal spans marketing, product, illustration, creative strategy, and execution. Lately, AI tools have pushed that horizontal reach further than the usual constraints allow.

\n

At AuthZed, I use AI to explore ideas that would normally be blocked by time or budget: 3D modeling, character variation, and light manufacturing for physical pieces. The point is not to replace design craft with machine output. It is to expand the number of viable ideas I can evaluate, then curate and polish a final product that meets our design standard.

\n

Exploration vs. curation: what actually changed

\n

Previous tools mostly sped up execution. AI speeds up exploration. When you can generate twenty plausible directions in minutes, the scarce skill is not pushing Bézier handles. It is knowing which direction communicates the right message, and why.

\n

Concrete example: Photoshop made retouching faster, but great photography still depends on eye and intent. Figma made collaboration faster, but good product design still depends on hierarchy, flows, and clarity. AI widens the search field so designers can spend more time on curation instead of setup.

\n
\n

Volume before polish
\nWhile at SVA we focused on volume before refinement. We would thumbnail dozens (sometimes a hundred) poster concepts before committing to one. That practice shaped how I use AI today: explore wide, then curate down to find the right solution. Richard Wilde's program emphasized iterative problem-solving and visual literacy long before today's tools made rapid exploration this easy.

\n
\n

Expanding horizontally with AI without losing the vertical

\n

AI works best when it is constrained by the systems you already trust, whether that is the permission model that controls who can view a file or the rules you enforce when writing code. Clarity is what turns an AI model from a toy into a multiplier. When we developed our mascot, Dibs, I knew we would eventually need dozens of consistent, reference-accurate variations: expressions, poses, environments. Historically, that meant a lot of sketching and cleanup before we could show anything.

\n

With specific instructions and a set of reference illustrations, I can review a new variation every few moments. None of those are final, but they land close while surfacing design choices I might not have explored on my own. I still adjust typography, tweak poses, and rebalance compositions before anything ships, so we stay on brand and accessible.

\n

This mirrors every major tool shift. Photoshop did not replace photographers. Figma did not replace designers. AI does not replace design thinking. It gives you a broader search field so you can make better choices earlier.

\n

\"Dibs

\n

Case study: turning 2D into 3D trophies

\n

For our offsite hackathon, I wanted trophies the team would be proud to earn and motivated to chase next time. Our mascot, Dibs, was the obvious hero. I started with approved 2D art and generated a character turn that covered front, side, back, and top views. From there I used a reconstruction tool (Meshy has been the most reliable lately) to get a starter mesh before moving into Blender for cleanup, posing, and print prep.

\n

\"Mesh

\n

I am not a Blender expert, but I have made a donut or two. With the starting mesh it was straightforward to get a printable file: repair holes, smooth odd vertices, and thicken delicate areas. When I hit something rusty, I leaned on documentation and the right prompts to fill the gaps. Before doing any of that refinement, I printed the raw export on my Bambu Lab P1P in PLA, cleaned up the supports, and dropped the proof on a teammate's desk. We went from concept to a physical artifact in under a day.

\n

We ended up producing twelve trophies printed in PETG with a removable base that hides a pocket for added weight (or whatever ends up in there). I finished them by hand with Rub 'n Buff, a prop-maker staple, to get a patinated metallic look. Once the pipeline was dialed in, I scaled it down for a sleeping Dibs keychain so everyone could bring something home, even if they were not on the podium. Small lift, real morale boost.

\n

\"Prints

\n

\"Dibs

\n

Why this matters for T-shaped designers

\n

When anyone can produce a hundred logos or pose variations, the value as a designer shifts to selection with intent. Brand expertise tells you which pose reads playful versus chaotic, which silhouette will hold up at small sizes, and which material choice survives handling at an event. The models handle brute-force trial. You own the taste, the narrative, and the necessary constraints.

\n

The result is horizontal expansion without vertical compromise. Consistency improves because character work starts from reference-accurate sources instead of ad-hoc one-offs. Physical production becomes realistic because you can iterate virtually before committing to materials and time.

\n

With newer models, I can get much closer to production-ready assets with far less back-and-forth prompting. I render initial concepts, select top options based on color, layout, expression, and composition, then create a small mood board for stakeholders to review before building the final production-ready version. The goal is not to outsource taste. It is to see more viable paths sooner, pick one, and refine by hand so the final assets stay original and on-brand.

\n

The guardrails that help keep quality high

\n
    \n
  • Define what success looks like before you generate anything, and decide what \"done\" means.
  • \n
  • Capture the non-negotiables: character traits, palette, typography, voice.
  • \n
  • Provide references instead of adjectives.
  • \n
  • Call out the exact angles, poses, or compositions you need.
  • \n
  • Keep a human in the loop for selection, edits, and distribution.
  • \n
  • Stay ethical: use your own IP, avoid mimicking living artists, and be transparent about where AI fits.
  • \n
\n

Mini checklist: stretch your own T

\n
    \n
  • Pick one adjacent skill that will unlock an upcoming launch.
  • \n
  • Codify the source material: references, palettes, schema, constraints.
  • \n
  • Pair one AI assist with that project and track what you keep, edit, or reject.
  • \n
  • Close with critique: share the work, gather feedback, and refine the pipeline for next time.
  • \n
\n
\n

Process note: I drafted the outline and core ideas, then used an editor to tighten phrasing and proofread. Same pattern as the rest of my work: widen the search, keep the taste.

\n
\n
\n

FAQs

\n

What is a T-shaped designer?
\nA designer with deep expertise in one area (the vertical) and working knowledge across adjacent disciplines (the horizontal).

\n

How does AI help T-shaped designers?
\nAI quickly generates plausible options so you can evaluate more directions, then apply judgment to pick, refine, and ship the best one.

\n

How do I keep brand consistency with AI images?
\nDefine non-negotiables (proportions, palette, silhouette), use reference images, and keep a human finish pass for polish.

\n

Which tools did you use in this workflow?
\nModel-guided image generation (e.g., Midjourney or a tuned model with references), a 2D-to-3D reconstruction step for a starter mesh (Rodin/Hyper3D or Meshy), Blender for cleanup, and a Bambu Lab P1P to slice G-code and print.

\n
\n", + "url": "https://authzed.com/blog/extended-t-augment-your-design-craft-with-ai-tools", + "title": "Extended T: Augment your design craft with AI tools", + "summary": "How a startup designer makes the T wide, expanding into 3D, rapid iteration, and small-batch production without lowering the quality bar.", + "image": "https://authzed.com/images/blogs/shipping-more-design-work/Blog-Design-Dibs-Trophy@2x.png", + "date_modified": "2025-10-03T16:00:00.000Z", + "date_published": "2025-10-03T16:00:00.000Z", + "author": { + "name": "Corey Thomas", + "url": "https://www.linkedin.com/in/cor3ythomas/" + } + }, + { + "id": "https://authzed.com/blog/introducing-authzeds-mcp-servers", + "content_html": "

We're excited to announce the launch of two new MCP servers that bring SpiceDB resources closer to your AI workflow, making it easier to learn and get started using SpiceDB for your application permissions: the AuthZed MCP Server and the SpiceDB Dev MCP Server.

\n

Two Servers, Complementary Use Cases

\n

The AuthZed MCP Server brings comprehensive documentation and learning resources directly into your AI tools. Whether you're exploring SpiceDB concepts, looking up API references, or searching for schema examples, this server provides instant access to all SpiceDB and AuthZed documentation pages, complete API method definitions, and a curated collection of authorization pattern examples. It's designed to make learning and referencing SpiceDB documentation seamless, right where you're already working.

\n

The SpiceDB Dev MCP Server takes things further by integrating directly into your development workflow. It connects to a sandboxed SpiceDB instance, allowing your AI coding assistant to help you learn and experiment with schema development, relationship testing, and permission checking. Need to validate a schema change? Want to test whether a specific permission check will work? Your AI assistant can now interact with SpiceDB on your behalf, making development faster and more intuitive.

\n

Ready to try them out? Head over to authzed.com/docs/mcp to get started with both servers.

\n

\"\"

\n

Our MCP Journey: From Prototypes to Production

\n

We've been experimenting with MCP since the first specification was published. Back when the term \"vibe coding\" was just starting to circulate, we built an early prototype MCP server for SpiceDB. The results were eye-opening. We were pleasantly surprised by how effectively LLMs could use the tools we provided, and delighted by the potential of being able to \"talk\" to SpiceDB through natural language.

\n

That initial prototype sparked conversations across the SpiceDB community. We connected with others who were equally excited about the possibilities, sharing ideas and exploring use cases together. Those early discussions helped shape our thinking about what MCP servers for SpiceDB could become.

\n

As the MCP specification continued evolving (particularly around enterprise readiness and authorization), we wanted to deeply understand these new capabilities. This led us to build a reference implementation of a remote MCP server using open source solutions. That reference implementation became a testbed for understanding the authorization aspects of the spec and exploring best practices for building production-ready MCP servers.

\n

Why We Built These Servers

\n

Through our own experience with AI coding tools, we've seen firsthand how valuable it is to have the right resources and tools available directly in your AI workflow. Our team's usage of AI assistants has steadily increased, and we know the difference it makes when information and capabilities are just a prompt away.

\n

For AuthZed and SpiceDB users, we wanted to bring learning and development resources closer to where you're already working. Whether you're learning SpiceDB concepts, building a new schema, or debugging permissions logic, having immediate access to documentation, examples, and a sandbox SpiceDB instance can dramatically speed up the development process.

\n

That's why we built both servers: the AuthZed MCP Server puts knowledge at your fingertips, while the SpiceDB Dev MCP Server puts your development environment directly into your AI assistant's toolkit.

\n

Building Responsibly: Authorization in MCP

\n

We're still actively building and experimenting with MCP. While the specification provides guidance for authorization, there's significant responsibility on MCP server developers to implement appropriate access controls for resources and accurate permissions around tools.

\n

This is particularly important as MCP servers become more powerful and gain access to sensitive systems. We're learning as we build, and we'll be sharing new tools and lessons around building authorization into MCP servers as we discover them. We believe the combination of SpiceDB for MCP permissions and AuthZed for authorization infrastructure is especially well-suited for defining and enforcing the complex permissions that enterprise MCP servers require.

\n

In the meantime, we encourage you to try out our MCP servers. The documentation for each includes detailed use cases and security guidelines to help you use them safely and effectively.

\n

If you're building an enterprise MCP server and would like help integrating permissions and authorization, we'd love to chat. Book a call with our team and let's explore how we can help.

\n
\n

Happy coding, and we can't wait to see what you build with these new tools! 🚀

", + "url": "https://authzed.com/blog/introducing-authzeds-mcp-servers", + "title": "Introducing AuthZed's MCP Servers", + "summary": "We're launching two MCP servers to bring SpiceDB closer to your AI workflow. The AuthZed MCP Server provides instant access to documentation and examples, while the SpiceDB Dev MCP Server integrates with your development environment. Learn about our MCP journey from early prototypes to production, and discover how these tools can speed up your SpiceDB development.", + "image": "https://authzed.com/images/upload/chat-with-authzed-mcp.png", + "date_modified": "2025-09-30T10:45:00.000Z", + "date_published": "2025-09-30T10:45:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/the-dual-write-problem-in-spicedb-a-deep-dive-from-google-and-canva-experience", + "content_html": "

This talk was part of the Authorization Infrastructure event hosted by AuthZed on August 20, 2025.

\n

How We Are Solving the Dual Write Problem at Canva

\n

In this technical deep-dive, Canva software engineer Artie Shevchenko draws on five years of experience with centralized authorization systems—first with Google's Zanzibar and now with SpiceDB—to tackle one of the most challenging aspects of authorization system implementation: the dual-write problem.

\n

The dual-write problem emerges when data must be replicated between your main database (like Postgres or Spanner) and SpiceDB, creating potential inconsistencies due to network failures, race conditions, and system bugs. These inconsistencies can lead to false negatives (blocking legitimate access) or false positives (security vulnerabilities).

\n

However, as Shevchenko explains, \"the good news is centralized authorization systems, they actually do simplify things quite a bit.\" Unlike traditional event-driven architectures where teams publish events hoping others interpret them correctly, \"with SpiceDB, you're fully in control\" of the entire replication process.

\n

SpiceDB offers several key advantages: \"you're not replicating aggregates. Most often, it's simple booleans or relationships,\" making inconsistencies easier to reason about. Additionally, \"the volume of replication is also much smaller\" since authorization data can live primarily in SpiceDB, and you're \"replicating just to SpiceDB, not to 10 other services.\"

\n

The talk explores four solution approaches—from cron sync jobs to transactional outboxes—with real-world examples from Google and Canva. Shevchenko's key insight: \"dual write is not a SpiceDB problem. It's a data replication problem,\" but \"SpiceDB makes the dual write problem, and ultimately the data integrity problem, much more manageable.\"

\n

On Ownership and Control

\n
\n

\"First of all, as a team now, you own the whole replication process. Because you own both copies of the data. Which makes a huge difference. You're not just publishing an event that other teams would hopefully correctly interpret and apply to their data stores.\"

\n
\n

Takeaway: SpiceDB gives you complete control over your authorization data replication, eliminating dependencies on other teams and reducing coordination overhead.

\n

On Proven Scale

\n
\n

\"And then feed it as an input to our MapReduce style sync job, which would sync data for 100 millions of users in just a couple of hours.\"

\n
\n

Takeaway: SpiceDB's approach has been battle-tested at Google scale, handling hundreds of millions of users efficiently.

\n

On Technical Advantages

\n
\n

\"But, the first three approaches without Zanzibar or SpiceDB would be really tricky, if not impossible. Not only because of the data ownership problem, but also because of aggregates. With event-driven replication, you're probably not replicating simple atomic facts.\"

\n
\n

Takeaway: SpiceDB's simple data model (booleans and relationships) makes dual-write problems significantly more manageable compared to traditional event-driven architectures that deal with complex aggregates.

\n

\n
\n

Full Transcript

\n

Talk by Artie Shevchenko, Software Engineer at Canva

\n

Introduction

\n

All right, let's talk about the dual-write problem. My name is Artie Shevchenko, and I'm a software engineer at Canva. My first experience with systems like SpiceDB was actually with Zanzibar at Google in 2017. And now I'm working on SpiceDB integration at Canva. So, yeah, almost five years working with this piece of tech.

\n

Why SpiceDB Simplifies Authorization

\n

And from my experience, there are two hard things in centralized authorization systems. It's dual-writes and data backfills. But neither of them is unique to Zanzibar or SpiceDB. In fact, dual-write is a fairly standard problem. And when we're talking about replication to another database, it is always challenging. Whether it's a permanent replication of some data to another microservice, or migration to a new database with zero downtime, or even replication to SpiceDB.

\n

The good news is centralized authorization systems, they actually do simplify things quite a bit. First of all, as a team now, you own the whole replication process. Because you own both copies of the data. Which makes a huge difference. You're not just publishing an event that other teams would hopefully correctly interpret and apply to their data stores. With SpiceDB, you're fully in control.

\n

Secondly, with SpiceDB, you're not replicating aggregates. Most often, it's simple booleans or relationships. Which makes it much easier to reason about the possible inconsistencies.

\n

And finally, the volume of replication is also much smaller. For two reasons. First, most of the authorization data you can store in SpiceDB only, once the migration is done. And second, with SpiceDB, you need to replicate just to SpiceDB, not to 10 other services. Well, there are also search indexes, but they're very special for multiple reasons. And the good news is search indexes, you don't need to solve them on the client side. Mostly, you can just delegate this to tools that materialize.

\n

But that said, even with replication to SpiceDB, there is a lot of essential complexity there that first, you need to understand. And second, you need to decide which approach you're going to use to solve the dual-write problem.

\n

Talk Structure and Definitions

\n

The structure of this talk, unlike the topic itself, is super simple. I don't have any ambition to make the dual-write problem look simple. It's not. But I do hope to make it clear. So, the goal of this talk is to make the problems and the underlying causes clear. And we're going to spend quite a lot of time unpacking what are the practical problems we're solving. And then, talking about the solution space, the goal is to make it clear what works and what doesn't. And, of course, the pros and cons of the different alternatives.

\n

But let's start with a couple of definitions. Almost obvious definitions aside, let's take a look at the left side of the slide, at the diagrams. Throughout the talk, we'll be looking into storing the same piece of data in two databases. Of course, ideally, you would store it in exactly one of them. But in practice, unfortunately, it's not always possible, even with SpiceDB.

\n

So, when information in one database does not match the information in another database, we'll call it a discrepancy or inconsistency. Or I'll simply say that databases are out of sync.

\n

When talking about the dual-write problem in general, I'll be using the term \"source of truth\" for the database that is kind of primary in the replication process. And the second database I'll call the second database. I was thinking about calling them primary and replica or maybe master and slave. But the problem is, these terms are typically used to describe replication within the same system. But I want to emphasize that these are different databases. And also, the same piece of knowledge may take very different forms in them. So, I'll stick to the terms \"source of truth\" and just some other second database. That's when I talk about the dual-write problem in general.

\n

But not to be too abstract, we'll be mostly looking at the dual-write problem in the context of data replication to SpiceDB, not just to some other abstract second database. And in this case, instead of using the term \"source of truth,\" I'll be using the term \"main database,\" referring to the traditional transactional database where you store most of your data, like Postgres, Dynamo, or Spanner. Because for the purposes of this talk, we'll assume that the main database is a source of truth for any replicated piece of data. Yes, theoretically, replicating in the other direction is also an option, but we won't consider that. We're replicating from the main database to SpiceDB.

\n

So, in different contexts, I'll refer to the database on the left side of this giant white replication arrow as either \"source of truth\" or \"main database\" or, even more specifically, Postgres or Spanner. Please keep this in mind.

\n

And finally, don't get confused when I call SpiceDB a database. Maybe I can blame the name. Of course, it's more than just a database. It is a centralized authorization system. But in this talk, we actually care about the underlying database only. So, hopefully, that doesn't cause any confusion.

\n

Defining the Dual-Write Problem

\n

All right. We're done with these primitive definitions. Now, let's define what the dual-write problem is. And let's start with an oversimplified but real example from home automation.

\n

Let's say there are two types of resources, homes and devices. Users can be members of multiple homes, and they have access to all the devices in their homes. So, whether a device is in one home or another, that information obviously has to be stored both in the main database, in this case, Spanner, and in SpiceDB.

\n

And if you want to move a device from one home to another, now you need to update the device's home in both databases. If you get a task to implement that, you would probably start with these two lines of code. You first write to the source of truth, which is Spanner, and then write to the second database, which is SpiceDB. The problem is you cannot write to both data stores in the same transaction, because these are literally different systems.

\n

So, a bunch of things can go wrong. If the first write fails, it's easy. You just let the error propagate to the client, and they can retry. But what about the second write? What if that one fails? Do you try to revert the first write and return an error to the client? But what if reverting the first one fails? It's getting complicated.

\n

Another idea. Maybe open a Spanner transaction and write to SpiceDB with the Spanner transaction open. I won't spend time on exploring this option, but it also doesn't solve anything, and in fact, just makes things worse. The truth is, none of the obvious workarounds actually make things better.

\n

So, we'll use these two simple lines of code as a starting point, and just acknowledge that there is a problem for us to solve there. The second write may fail for different reasons. It's either because of a network problem, or a problem with SpiceDB, or even the machine itself terminating after the first line. In all of these scenarios, the two databases become out of sync with each other. One of them will think that the device is in Home 1, and another will think that it is in Home 2.

\n

Data Integrity Problems: False Negatives and False Positives

\n

The second write failing can create two types of data integrity problems. It's either SpiceDB is too restrictive. It doesn't allow access to someone who should have access, which is called a false negative on the slides. Or the opposite. SpiceDB can be too permissive, allowing access to someone who shouldn't have access. False negatives are more visible. It's more likely you would get a bug report for it from a customer. But false positives are actually more dangerous, because that's potentially a security issue.

\n

We've already tried several obvious workarounds, and none of them worked. But let's give it one last shot, given that it is false positives that are the main issue here. Maybe there is a simple way to get rid of those. Let's try a special write operations ordering. Namely, let's do SpiceDB deletes first. Then, in the same transaction, make all the changes to the main database. And then, do SpiceDB upserts.

\n

So, in our example, the device is first removed from home 1 in SpiceDB. And then, after the Spanner write, the device is added to home 2 in SpiceDB. And it actually does the trick. And it's easy to prove that it works not only in this example, but in general. If there are no negations in the schema, such an ordering of writes ensures no false positives from SpiceDB. So, now the dual write problem looks like this. Much better, isn't it? No security issues anymore.

\n

Let me play devil's advocate here. If the second or the third write fails, let's say, 100 times per month, we would probably hear from nobody. Or maybe one user. But for one user, can you fix it manually? But aren't we missing something here?

\n

The Race Condition Problem

\n

The problem is, there is a whole class of issues we've ignored so far. It's race conditions. In this scenario from the slide, we're doing writes in the order that was supposed to totally eliminate the false positives. But as a result of these two requests from Alice and Bob, we get a false positive for Tom. That's because we're no longer talking about failing writes. None of the writes failed in this scenario. It is race conditions that caused the data integrity problem here.

\n

So, we have identified two causes or two sources of discrepancies between the two databases. The first is failing writes. And the second is race conditions. So, unfortunately, yet another workaround doesn't really make much difference. Back to our initial simple starting point. Two consecutive writes. First write to the main database. And then write to SpiceDB. Probably in a try-catch like here.

\n

And one last note looking at this diagram. Often people think about the dual write problem very simplistically. They think if they can make all the writes eventually succeed, that would solve the problem for them. So, all they need is a transactional outbox or a CDC, change data capture, or something like this. But that's not exactly the case. Because at the very least, there are also race conditions. And as we'll see very soon, it's even more than that.

\n

Adding Backfill Complexity

\n

And now, let's add backfill to the picture. If you're introducing a new field, a new type of information that you want to be present in multiple databases, you just make the schema changes, implement the dual write logic, and that's it. You can immediately start reading from the new field or a new column in all the databases. But if it's not a new type of information, if there is pre-existing data, then the data needs to be backfilled.

\n

Then the new column, field, or relation goes through these three phases. You can say there is a lifecycle. First, the schema definition changes. New column is created or something like this. Then, dual write is enabled. And finally, we do a backfill, which iterates through all of the existing data and writes it to the second database. And once the backfill is done, the data in the second database is ready to use. It's ready for reads and ready for access checks if we're talking about SpiceDB.

\n

And as it's easy to see from the backfill pseudocode, backfill also contributes to race conditions. Simply because the data may change between the read and write operations. And again, welcome false positives.

\n

Okay. So far, we've done two things. We've defined the problem. And we've examined multiple tempting workarounds just to find that they don't really solve anything. Now, let's take a look at several approaches used at Google and Canva that actually do work. And, of course, discuss their trade-offs.

\n

Solution Approaches

\n

Approach 1: Cron Sync Jobs (Google's Solution)

\n

First of all, doing nothing about it is probably not a good idea in most cases. Because authorization data integrity is really important. It's not only false negatives. It is false positives as well, which, as you remember, can be a security issue. The good news is there are multiple options to choose from if you want to solve the dual-write problem.

\n

And let's start with a solution we used in our team at Google, which is pretty simple. We just had a cron sync job. That job would run several times per day and fix all the discrepancies between our Spanner instance and Zanzibar. Looking at the code on the right side, because of the sync job, we can keep the dual-write code itself very, very simple. It's basically the two lines of code we started with.

\n

Sync jobs at Google are super common. And what made it even easier for us here is consistent snapshots. We could literally have a snapshot of both Spanner and Zanzibar for exactly the same instant. And then feed it as an input to our MapReduce style sync job, which would sync data for 100 millions of users in just a couple of hours.

\n

And interestingly, sync jobs are the only solution that truly guarantees eventual consistency, no matter what. Because in addition to write failures and races, there is also a third problem here. It is bugs in the data replication logic.

\n

Now, the most interesting part is how did it perform in practice? And thanks to our sync job, we actually know for sure how did it go. Visibility into the data integrity is a huge, huge benefit. We not only knew that all the discrepancies get fixed within several hours, but we also knew how many of them we actually had. And interestingly, the number of discrepancies was really high only when we had bugs in our replication logic. Race conditions and failed writes, they did cause some inconsistencies too. But even at our scale, there were a small number of them, typically tens or hundreds per day.

\n

Now, talking about the downsides of this approach, there are two main downsides. The first one is there are always some transient discrepancies, which can be there for several hours. Because we're not trying to address race conditions or failing writes in real time. And the second problem is infra costs. Running a sync job for a large database almost continuously is really, really expensive.

\n

Transactional Outbox Pattern Foundation

\n

All right. We're done with the sync jobs. Now, all the other approaches we'll be looking at, they leverage the transactional outbox pattern. For some of those approaches, you could achieve similar results with CDC, change data capture, instead of the outbox. But outbox is more flexible, so we'll stick to it.

\n

And at its core, the transactional outbox pattern is really, really simple. When writing changes to the main database, in the same transaction, we also store a message saying, \"please write something to SpiceDB.\" And unlike traditional message queues outside of the main database, such an approach truly guarantees for us at-least-once delivery. And then there is a worker running continuously that pulls the messages from the outbox and acts upon them, makes the SpiceDB writes. Note that I mentioned a Zedtoken here in the code, but these are orthogonal to our topics, so I'll just skip them on the next slides.

\n

As I already mentioned, the problem the transactional outbox solves for us is reliable message delivery. Once SpiceDB and the network are in a healthy state, all the valid SpiceDB writes will eventually succeed. One less problem for us to worry about. But similar to CDC, it doesn't solve any of the other problems. It obviously doesn't provide any safety nets for the bugs in the data replication logic. And as it's easy to see from these examples, the transactional outbox is also subject to race conditions. Unless there are some extra properties guaranteed, which we'll talk very, very soon about.

\n

Okay. Now that we've set the stage with transactional outboxes, let's take a look at several solutions. The second approach to solving the dual-write problem is what I would call micro-syncs. Not sure if there's a proper term for it, but let me explain what I mean. In many ways, it's very similar to the first approach, cron sync jobs. But instead of doing a sync for the whole databases, we would be doing targeted syncs for specific relationships only.

\n

For example, if Bob's role in Team X changed, we would completely resync Bob's membership in that team, including all his roles. So in the worker, we would pull the message from the outbox, then read the data from both databases, and fix it in SpiceDB if there are any discrepancies.

\n

To make it scale, instead of writing it to SpiceDB from the worker directly, we can pull those messages in batches and just put them into another durable queue, for example, into Amazon SQS. And then we can have as many workers as we need to process those messages.

\n

But aren't these micro-syncs subject to races themselves? They are. Here on this diagram, you can see an example of such a race condition creating a discrepancy. But adding just a several-seconds delay makes such races highly unlikely. And for our own peace of mind, we can even process the same message again, let's say in one hour. Then races become practically impossible. I mean, yes, in theory, the internet is a weird thing that doesn't make any guarantees. But in practice, even TCP retransmissions, they won't take an hour.

\n

So the race conditions are solved with significantly delayed micro-syncs. And you can even do multiple syncs for the same message with different delays.

\n

Now, what about bugs in the data replication logic? And in practice, that's the only difference with the first approach, is that micro-syncs, they do not cover some types of bugs. Specifically, let's say you're introducing a new flow that modifies the source of truth, but then you simply forget to update SpiceDB in that particular flow. Obviously, if there is no message sent, there is no micro-sync, and there would be a discrepancy. But apart from that, there are no other substantial downsides in micro-syncs. They provide you with almost the same set of benefits as normal sync jobs, and even fix discrepancies on average much, much faster, which is pretty exciting.

\n

And finally, let's take a look at a couple of options that do not rely on syncs between the databases. Let's introduce a version field for each replicated field. In our home automation example, it would be a home version column in the devices table, and a corresponding home version relation in the SpiceDB device definition. And then we must ensure that each write to the home ID field in Spanner increments the device home version value. And then in the message itself, we also provide this new version value so that when the worker writes to SpiceDB, it can do a conditional write to make sure it doesn't override a newer home value with an older one.

\n

And there are different options for how to implement this. But none of them are really simple. So introducing a bug in the replication logic, honestly, is pretty easy. And the worst thing is, unlike sync jobs or even micro-syncs, this approach doesn't provide you with any safety nets. When you introduce a bug, it won't even make it visible. So yeah, that's the three downsides of this approach. It's complexity, no visibility into the replication consistency, and no safety nets. And the main benefit is, it does guarantee there would be no inconsistencies from race conditions or failed writes.

\n

And the last option is here more for completeness. To explore the idea that lies on the surface and, in fact, almost works, but there are a lot of nuances, limitations, and pitfalls to avoid there. And that's the only option where we solve the dual write problem by actually abandoning the dual write logic. So let's say we have a transactional outbox. And the only thing the service code does, it writes to the main database and the transactional outbox. No SpiceDB writes there. So there is no dual write.

\n

And there is just a single worker that processes a single message at a time, the oldest message available in the transactional outbox, and then it attempts to make a SpiceDB write until it succeeds. So the transactional outbox is basically a queue. And that by itself guarantees eventual consistency. I'll give you some time to digest this statement.

\n

You can prove that as long as there are no bugs, the transactional outbox is a queue, and there is a single consumer, eventual consistency between the main database and SpiceDB is guaranteed. Because it's FIFO, first in, first out, and there are no SpiceDB writes from service code.

\n

However, a single worker processing one message at a time from a queue wouldn't provide us with a high throughput. So you might be tempted to, instead of writing to SpiceDB directly from the worker, to put it into another durable queue. But I'm sure you can see the problem with this change, right? We've lost the FIFO property. So now it's subject to races. Unless that second queue is FIFO as well, of course. But if it's FIFO, guess what? We're not increasing throughput.

\n

So yeah, if we're relying on the FIFO property to address race conditions, there is literally no reason to transfer messages into another durable queue. If you want to increase the throughput, just use bulk SpiceDB writes]. But you would need to preprocess them to make sure there are no conflicts within the same batch. Yes, there is no horizontal scalability, but maybe that's not a problem for you.

\n

Yet, what would probably be a problem for most use cases is that a single problematic write can stop the whole replication process. And once we actually experienced exactly this issue, a single malformed SpiceDB write halting the whole replication process for us. And that's pretty annoying, as it requires manual intervention and is pretty urgent.

\n

And yet another class of race conditions is introduced by backfills. Because FIFO is a property of the transactional outbox. But backfill writes, fundamentally, they do not go through the outbox. So, yeah. While it's possible to introduce a delay to the transactional outbox specifically for the backfill phase, to address it, I would say the overall amount of problems with this approach is already pretty catastrophic.

\n

So, let's do a quick summary. We've explored four different approaches to solving the dual write problem. And here is a trade-off table with the pros and cons of each of them. The obvious loser is the last FIFO transactional outbox option. And probably conditional writes with the version field are not the most attractive solution as well. Mostly because of their complexity and lack of visibility into the replication consistency.

\n

So, the two options we're probably choosing from are the first and the second one. It's two types of syncs. Either a classic cron sync job or micro syncs. And, yeah. You can totally combine most of these approaches with each other if you want.

\n

We're almost done. I just wanted to reiterate the fact that dual write is not a SpiceDB problem. It's a data replication problem. So, let's say you're doing event-driven replication. Strictly speaking, there are no dual writes, same as in the last FIFO option. But, ultimately, there are two writes to two different systems, to two different databases. So, we're facing exactly the same set of problems.

\n

Adding a transactional outbox can kind of ensure that all the valid writes will eventually succeed. But, probably only if you own the other end of the replication process. Then, you can also add the FIFO property to address race conditions, which is option four. But, the first three approaches without Zanzibar or SpiceDB would be really tricky, if not impossible. Not only because of the data ownership problem, but also because of aggregates. With event-driven replication, you're probably not replicating simple atomic facts.

\n

So, yeah. SpiceDB makes the dual write problem, and ultimately the data integrity problem, much more manageable.

\n

And that's it. Hopefully, this presentation brought some clarity into the highly complex dual write problem.

", + "url": "https://authzed.com/blog/the-dual-write-problem-in-spicedb-a-deep-dive-from-google-and-canva-experience", + "title": "The Dual-Write Problem in SpiceDB: A Deep Dive from Google and Canva Experience", + "summary": "In this technical deep-dive, Canva software engineer Artie Shevchenko draws on five years of experience with centralized authorization systems, first with Google's Zanzibar and now with SpiceDB, to tackle one of the most challenging aspects of authorization system implementation: the dual-write problem. This talk was part of the Authorization Infrastructure event hosted by AuthZed on August 20, 2025.", + "image": "https://authzed.com/images/blogs/a5-recap-canva.png", + "date_modified": "2025-09-16T08:00:00.000Z", + "date_published": "2025-09-16T08:00:00.000Z", + "author": { + "name": "Artie Shevchenko", + "url": "https://au.linkedin.com/in/artie-shevchenko-67845a4b" + } + }, + { + "id": "https://authzed.com/blog/turos-spicedb-success-story-how-the-leading-car-sharing-platform-transformed-authorization", + "content_html": "

This talk was part of the Authorization Infrastructure event hosted by AuthZed on August 20, 2025.

\n

Hosting Teams: A Case for SpiceDB

\n

Andre, a software engineer at Turo, shared how the world's leading car-sharing platform solved critical security and scalability challenges by implementing SpiceDB with managed hosting from AuthZed Dedicated. Faced with fleet owners having to share passwords due to rigid ownership-based permissions, Turo built a relationship-based authorization system enabling fine-grained, team-based access control. The results speak for themselves: \"SpiceDB made it trivial to design and implement the solution compared to traditional relational databases\" while delivering \"much higher performance and throughput.\" The system proved remarkably adaptable—adding support for inactive team members required \"literally one single line of code\" to change in the schema. AuthZed's managed hosting proved equally impressive, with only one incident in over two years of production use. As Andre noted, \"ultimately hosting with AuthZed saved us money in the long run\" by eliminating the need for dedicated infrastructure engineering, allowing Turo to focus on their core business while maintaining a \"blistering fast\" authorization system.

\n

On Reliability and Expert Support:

\n
\n

\"In over two years [...] of operations in production, we had a single incident. And even then in that event, they demonstrated the capacity to recover from faults very, very quickly.\"

\n
\n

On Business Focus:

\n
\n

\"For over two years, Turo has used AuthZed's [Dedicated] offering where they're responsible for deploying and maintaining all the infrastructure required by the SpiceDB clusters. And that gives us time back to focus on growing our business, which is our primary concern.\"

\n
\n

\n
\n

Full Transcript

\n

Talk by Andre Sanches, Software Engineer at Turo

\n

Hello, everyone, and welcome. I'm Andre, a software engineer at Turo, working with SpiceDB for just over two years now. I'm here to share a bit of our experience with SpiceDB as a product and AuthZed as a hosting partner. Congratulations, by the way, to AuthZed for its five-year anniversary. It's a privilege to be celebrating this milestone together. So let's get started.

\n

Introduction to Turo

\n

First, a quick introduction to those who don't know Turo. We're the leading car-sharing platform in the world, operating in most of the US and four other countries. Our mission is to put the world's 1.5 billion cars to better use. Our business model is similar to popular home-sharing platforms you may be familiar with, with a fundamental difference. Vehicles are less expensive compared to homes, so it's common that hosts build up fleets of vehicles on Turo. In fact, many of our hosts build successful businesses with our help, and therein lies a challenge that we solved with SpiceDB.

\n

The Challenge

\n

Hosts have responsibilities, such as communicating with guests in a timely manner, taking pictures of vehicles prior to handoff, and again, upon return of the vehicle to resolve disputes that may happen, managing vehicle schedules, etc. These things take time and effort, and as you scale up your business, fleet owners often hire people to help. And the problem is, in the past, Turo had a flat, ownership-based permission model. You could only interact with the vehicles you own, so hosts had no other choice but to share their accounts and their passwords. It's safe to say that folks in the target audience of this event understand how big of a problem that can be.

\n

Moreover, third-party companies started sprouting all over the place to bridge that gap, to manage teams by way of calling our backend, which adds yet another potential attack vector by accessing Turo's customer data. So, it had become a large enough risk and a feature gap that we set out to solve that problem.

\n

The Solution

\n

The solution was to augment the flat, ownership-based model with a team-based approach, where admin hosts, meaning the fleet owner, can create teams that authorize individual drivers to perform specific actions, really fine-grained, on one or more of the vehicles that they own. Members are invited to join teams via email, which gives them the opportunity to sign up for a Turo account if they don't yet have one.

\n

So, the solution from a technical standpoint is a graph-based solution that enables our backend to determine very quickly, can Driver ABC perform a certain action on vehicle XYZ? In this case right here, can Driver ABC communicate with guests that booked that certain vehicle? SpiceDB made it trivial to design and implement the solution compared to traditional relational databases, which is most of our backend. Moreover, it offloaded our monolithic database with a tool that offers much higher performance and throughput.

\n

Implementation Details

\n

Anecdotally, the simplicity of SpiceDB helped implement a last-minute requirement that crept in late in the development cycle—support for inactive team members, the ones who are pending invitation acceptance. Prior to that, the invitation system was purely controlled in MySQL. And we realized, you know what, if we're storing the team in SpiceDB, why not make it so that we can store inactive users too? And the reason I'm mentioning this is this impressed everybody who was working on that feature at the time, because it was literally one single line of code that we had to change in the schema to enable this.

\n

So I'll talk more about this in a second where I show some technical things. But the graph that I just mentioned then roughly translates to this schema. So this is a simplified but still accurate rendition of what our SpiceDB schema looks like. Hopefully this clarifies how driver membership propagates to permissions on vehicles, if you're familiar with SpiceDB schemas.

\n

Some noteworthy mentions here are self-referencing relations, this one up here, or all the way up there. So basically, this is how we implemented the inactive users. If you notice that there, there's the member role and then an active member role. And by way of adding a single record that connects the member role with an active member role in the hosting team, you can enable and disable drivers. So this was so incredibly impressive at the time, because we thought we're going to have to change the entire schema and a whole bunch of other changes. And no, that's all it took.

\n

And again, it's one of those things that once it clicked, if you're familiar with the SpiceDB but not with the self-referencing relation, looking at this, that #member role and pointing to a relation in the same definition, it kind of looks a little daunting. It did to me. I don't know—you're probably smarter than I am, but it was daunting. But then one day it just clicked and I'm like, hmm, okay, that's how it is. And I was super stoked to continue working with SpiceDB and I'm going to implement more and more of the features. And help the feature team, actually, because it was a separate feature team that was working on this. So that self-referencing was interesting.

\n

Namespacing Feature

\n

The other noteworthy mention here is the same namespaces. If you notice in front of the definition, there's a hosting teams forward slash. This is how we separate the schema into multiple copies of the same schema in the same cluster. So we have an ephemeral test environment in which we create and destroy on command sandbox replicas of our entire backend system. This enables us to deploy dozens, if not hundreds, of isolated copies of the schema, along with everything else in our backend, to test new features in a controlled environment that we can break, that we can modify as we see fit without affecting customers. And the namespacing feature in SpiceDB allowed us to use the same cluster for all those copies and save us some money. So we don't have to stand up a new server. We, you know, there's no computational costs or delays or any of that in provisioning computing resources and this and that.

\n

So the feature was released the week of, you know, us going pre-live, in a test environment. And we were probably the first adopters of this and it was really cool.

\n

Performance

\n

So let me see at a high level, this is how our hosting team feature works. You can see, let me use the mouse here. You can see how permissions propagate to teams. So, team pricing and availability goes to the relation of the team in the hosting team. Hosting team has the pricing and availability for active member roles or admin role. Plus sign, as you all know, is a or, and then it connects to the driver. Simple, fast. This is blistering fast.

\n

One other query that we make to SpiceDB very, very often—matter of fact, this is the single most, you know, issued query to SpiceDB at any given time—is, is the currently logged in user a cohost. And that's done for everybody. Even if you're not a cohost, this is how we determine whether you're a cohost or not. That will then drive UI, you know, decisions, what, what widgets to show. You know, only if you're, if it's pertinent to you, if you're a cohost, if not, then there's no, no reason to. To pollute the UI with, you know, cohosting features. Yeah.

\n

User Interface

\n

And this is what the UI looks like. So, you, on a team, you have cohosts and you can add or invite, here's an interesting thing. The code name of the project was cohosting. It ended up being hosting teams because we then used the nomenclature cohosts to add people to teams. So, here you have your cohosts. You can invite them by email. They get an, an email that points them to sign up to Turo. If they already have an account, they can just log in. And the moment they log in, it automatically accepts the invitation.

\n

Next you have the fine grain permissions of what your group can, or your team can do. In this case, we have trip management enabled. This is the base actually, you know, the base permission that you have to grant to everybody on the team. And then there's pricing and availability that allows you to set prices for vehicles, discounts, you know, see finances and all that stuff. So you can imagine why that's, you know, why it's very nice to be able to toggle this and not let, you know, just any cohost that has no business looking at your finances, you know, just hiding it from them by way of untoggling the permission here. And then you have your vehicles. The list shows all the vehicles you own. You just toggle the ones you want, save, and you're off to the races. Your hosting team is in place and working.

\n

AuthZed Hosting Partnership

\n

So also that as a hosting partner, when you're considering using, you know, a big challenge of adopting a new system is setting it up and running it in a scalable and reliable way. You have to manage, you know, security issues. You have to manage our scaling. You have to manage all kinds of, you know, infrastructure challenges. And that costs money. In this day and age, it's really hard to find engineers who understand infrastructure well enough to manage all the moving parts of a highly scalable system such as SpiceDB.

\n

For over two years, Turo has used AuthZed's fully hosted cloud offering where they're responsible for deploying and maintaining all the infrastructure required by the SpiceDB clusters. And that gives us time back to focus on growing our business, which is our primary concern. So this is a great opportunity actually to give AuthZed a shout out for their excellent reliability.

\n

In over two years, over two years and three months now, actually of operations in production, we had a single incident. And even then in that event, they demonstrated the capacity to recover from faults very, very quickly to pinpoint the problem incredibly quickly. And, you know, take care of it. I think the outage was, we were out for like 38 minutes, something like that. It was, you know, we've had other partners that things were much, much more challenging. So, and once in two years, the root cause, the entire handling of the outage was very, very, you know, nice to see. Because it involved thorough analysis, post-mortems and making sure that it doesn't happen again, putting in safeguards to ensure that it doesn't happen again.

\n

So everything was, you know, systems fail. We understand that. And how we deal with it is how, is what shows how, you know, how good you are. And with AuthZed, we rest, you know, easy knowing that we're well taken care of. And ultimately hosting with AuthZed saved us money in the long run because it would otherwise take a lot of engineering time and effort just to keep the clusters running. So if your company is considering adopting SpiceDB, I would highly encourage you to have a chat with AuthZed about hosting as well. From our experience, it's well worth the investment.

", + "url": "https://authzed.com/blog/turos-spicedb-success-story-how-the-leading-car-sharing-platform-transformed-authorization", + "title": "Turo's SpiceDB Success Story: How the Leading Car-Sharing Platform Transformed Authorization", + "summary": "Andre, a software engineer at Turo, shared how the world's leading car-sharing platform solved critical security and scalability challenges by implementing SpiceDB with managed hosting from AuthZed Dedicated. This talk was part of the Authorization Infrastructure event hosted by AuthZed on August 20, 2025.", + "image": "https://authzed.com/images/blogs/a5-recap-turo.png", + "date_modified": "2025-09-15T13:49:00.000Z", + "date_published": "2025-09-15T13:49:00.000Z", + "author": { + "name": "Andre Sanches", + "url": "https://www.linkedin.com/in/ansanch" + } + }, + { + "id": "https://authzed.com/blog/authzed-is-5-event-recap-authorization-infrastructure-insights", + "content_html": "

Last month we celebrated AuthZed's fifth birthday with our first-ever \"Authorization Infrastructure Event\" - a deep dive\ninto the technical challenges and innovations shaping the future of access control.

\n

The livestream brought together industry experts from companies like Canva and Turo to share real-world experiences with\nauthorization at scale, featured major product announcements including the launch of AuthZed Cloud, and included\nfascinating discussions with database researchers about the evolution of data infrastructure. From solving the\ndual-write consistency problem to powering OpenAI's document processing, we covered the full spectrum of modern\nauthorization challenges.

\n

Watch the full event recording (2.5 hours)

\n

The Big News

\n

Before we dive into the technical talks, let's start with the big announcements:

\n

AuthZed Cloud is Live

\n

We finally launched AuthZed Cloud - a self-service platform that allows you to provision,\nmanage, and scale your\nauthorization infrastructure on demand. Sign up with a credit card, get your permission system running in minutes, and\nscale as needed - authorization that runs like cloud infrastructure. Through\nour AuthZed Cloud Starter Program, we're\nalso providing credits to help teams try out the platform.

\n

Watch Jake's keynote

\n

AuthZed Powers OpenAI's Data Connectors

\n

OpenAI securely connects enterprise knowledge with ChatGPT by using AuthZed to\nhandle permissions for their corporate data connectors - when ChatGPT connects to your company's Google Drive or\nSharePoint. They've built connectors to process and search over 37 billion documents for more than 5 million\nbusiness users while respecting existing data permissions using AuthZed's authorization infrastructure.

\n

This demonstrates how authorization infrastructure has become critical for AI systems that need to understand and\nrespect complex organizational data permissions at massive scale.

\n

Technical Deep Dives and Customer Stories

\n

Real Talk: The Dual-Write Problem

\n

Artie Shevchenko from Canva delivered an excellent explanation of the dual-write problem that many authorization\nteams face. Anyone who has tried to keep data consistent between two different databases (such as your main database +\nSpiceDB) will recognize this challenge. Watch Artie's full talk

\n

Artie was direct about the reality: the dual-write problem is hard. Here's what teams need to understand:

\n

Things Will Go Wrong

\n
    \n
  • Network calls fail between your database writes
  • \n
  • Race conditions happen when multiple requests hit at once
  • \n
  • Backfill processes create their own special category of chaos
  • \n
\n

Four Ways to Deal With It

\n
    \n
  1. Sync jobs - Run periodic cleanup to fix inconsistencies. Expensive but reliable.
  2. \n
  3. Micro-syncs - Target specific relationships when they change. Faster than full syncs.
  4. \n
  5. Version fields - Add versioning to prevent overwriting newer data. Complex but prevents races.
  6. \n
  7. FIFO queues - Process everything in order. Simple but doesn't scale well.
  8. \n
\n

Canva uses sync jobs as their safety net. Artie's team found that most inconsistencies actually came from bugs in their replication logic, not from the network problems everyone worries about. The sync jobs caught everything and gave them visibility into what was actually happening.

\n

The Real Lesson: Don't try to be clever. Pick an approach, implement it well, and have monitoring so you know when things break.

\n

How Turo Built Authorization That Actually Works

\n

Andre Sanches from Turo told the story of how they moved from \"just share your password with your employees\" to\naccurate fine-grained access controls. Watch Andre's talk

\n

The Problem Was Real\nTuro hosts were sharing account credentials with their team members. Fleet owners needed help managing vehicles, but\nTuro's permission system only understood \"you own it or you don't.\" This created significant security challenges.

\n

The Solution Was Surprisingly Straightforward\nAndre's team built a relationship-based permission system using SpiceDB that supports:

\n
    \n
  • Teams with admin and member roles
  • \n
  • Fine-grained permissions (who can message guests vs. who can see finances)
  • \n
  • Vehicle-level access controls
  • \n
  • Support for pending team invitations
  • \n
\n

The best part? When they needed to add support for inactive team members late in development, it was literally a\none-line schema change. This exemplifies the utility of SpiceDB schemas and authorization as infrastructure.

\n

Two Years Later\nTuro has had exactly one incident with their AuthZed Dedicated deployment in over two years - and that lasted 38 minutes. Andre made it clear: letting AuthZed handle the infrastructure complexity was absolutely worth it. His team focuses on building features, not babysitting databases.

\n

Database Philosophy and Spicy Takes

\n

Professor Andy Pavlo from Carnegie Mellon joined our co-founder Jimmy Zelinskie for a chat about databases, AI,\nand why new data models keep trying to kill SQL. Watch the fireside chat

\n

The SQL Cycle\nAndy's been watching this pattern for decades:

\n
    \n
  1. Someone announces SQL is dead and their new data model is the future
  2. \n
  3. Everyone gets excited about the revolutionary approach
  4. \n
  5. Turns out the new thing solves some problems but creates others
  6. \n
  7. SQL absorbs the useful parts and keeps trucking
  8. \n
\n

Vector databases? Being absorbed into PostgreSQL. Graph databases? SQL 2024 added property graph queries. NoSQL? Most of those companies quietly added SQL interfaces.

\n

The Spiciest Take\nJimmy dropped this one: \"The PostgreSQL wire protocol needs to die.\"

\n

His argument: Everyone keeps reimplementing PostgreSQL compatibility thinking they'll get all the client library benefits for free. But what actually happens is you inherit all the complexity of working around a pretty terrible wire protocol, and you never know how far down the rabbit hole you'll need to go.

\n

Andy agreed it's terrible, but pointed out there's not enough incentive for anyone to build something better. Classic tech industry problem.

\n

AI and Databases\nThey both agreed that current AI hardware isn't radically different from traditional computer architecture - it's just specialized accelerators. The real revolution will come from new hardware designs that change how we think about data processing entirely.

\n

Sneak Peeks from the AuthZed Lab

\n

PostgreSQL Foreign Data Wrapper

\n

Joey Schorr (our CTO) showed off something that made me genuinely excited: a way to make SpiceDB look like regular\nPostgreSQL tables. Watch Joey's demo

\n

You can literally write SQL like this:

\n
SELECT * FROM documents\nJOIN permissions ON documents.id = permissions.resource_id\nWHERE permissions.subject_id = 'user:jerry' AND permissions.permission = 'view'\nORDER BY documents.title DESC;\n
\n

The foreign data wrapper handles the SpiceDB API calls behind the scenes, and PostgreSQL's query planner figures out the optimal way to fetch the data. Authorization-aware queries become just... queries.

\n

AuthZed Materialize Gets Real

\n

Victor Roldán Betancort demonstrated AuthZed Materialize, which precomputes complex permission decisions so SpiceDB\ndoesn't have to traverse complex relationship graphs in real-time. Watch Victor's demo

\n

The demo showed streaming permission updates into DuckDB, then running SQL queries against the materialized permission\nsets. This creates a real-time index of who can access what, without the performance penalty of traversing permission\nhierarchies on every query.

\n

Authorization and MCP Servers

\n

Sam Kim talked about authorization for Model Context Protocol servers and released a reference implementation for a\nMCP server with fine-grained authorization support build in. Watch Sam's MCP talk

\n

The key insight: if you don't build official MCP servers for your APIs, someone else will. And you probably won't like how they handle authorization. Better to get ahead of it with proper access controls baked in.

\n

What We're Thinking About

\n

Irit Goihman (our VP of Engineering) shared some thoughts on how we approach building software. Watch Irit's insights

\n
    \n
  • Bottom-up innovation: Engineers who talk to customers and operate what they build make better decisions
  • \n
  • Responsible AI adoption: We use AI tools extensively, but with humans in the loop and measurable outcomes
  • \n
  • Test coverage through AI: AI-generated test cases with human review have significantly improved our coverage
  • \n
\n

Remote-first engineering teams need different approaches to knowledge sharing and innovation.

\n

Community Love

\n

We recognized the contributors who make SpiceDB a thriving open source project. The community response has been\nexceptional:

\n

Core SpiceDB Contributors:

\n
    \n
  • Kartikay Saxena - Student contributor who's been consistently improving the codebase
  • \n
  • Braden Groom from Reddit - Bringing real-world production experience back to the project
  • \n
  • Jesse White from RELiON - Infrastructure and reliability improvements
  • \n
  • Sean Bryant from GitHub - Core functionality enhancements
  • \n
  • Nicolas Barbey from leboncoin - International perspective and contributions
  • \n
  • Chris Kellendonk from PagerDuty - Monitoring and observability improvements
  • \n
  • Lex Cao - Performance and optimization work
  • \n
  • Meyazhagan - Documentation and developer experience improvements
  • \n
\n

Client Library Heroes (making SpiceDB accessible everywhere):

\n
    \n
  • Danh Tran Thanh - AuthZed-codegen for type-safe Go code generation
  • \n
  • Michael Tanczos - SpiceDB.net bringing authorization to the .NET ecosystem
  • \n
  • Shubham Gupta - AuthZed_ex for Elixir developers
  • \n
  • Ioannis Canellos - Quarkus-AuthZed-client for Java/Quarkus apps
  • \n
  • Lauren (Lurian) - SpiceDB-rust client for the Rust community
  • \n
  • Thomas Richner - SpiceGen for Java client generation
  • \n
  • David Alsbury & Michael O'Connell - Chipotle-rest PHP client (amazing name)
  • \n
  • Link Orb team - Both spicedb-php HTTP client and spicedb-bundle Symfony integration
  • \n
\n

Community Tooling Builders (the ecosystem enablers):

\n
    \n
  • Mohd Ejaz Siddiqui - SpiceDB UI for visual management
  • \n
  • Chris Roemmich & Eytan Hanig - SpiceDB operator Helm charts for Kubernetes
  • \n
  • Nicole Hubbard/Infratographer - Permissions API service layer
  • \n
  • Guilherme Cassolato from Red Hat - Authorino-SpiceDB integration
  • \n
  • Thomas Darimont - OPA-SpiceDB experiments bridging policy engines
  • \n
  • Dominik Guhr from INNOQ - Keycloak-SpiceDB event listener
  • \n
  • Chip - VS Code syntax highlighting for .zed files
  • \n
  • Mike Leone - Tree-sitter grammar for AuthZed schema
  • \n
\n

Every single one of these folks saw a gap and decided to fill it. That's what makes open source communities amazing.

\n

Looking Back, Looking Forward

\n

Five years ago, application authorization was often something that was DIY and hard to scale. Today, companies are\nprocessing billions of permission checks through purpose-built infrastructure.

\n

The next five years? AI agents are going to need authorization systems that don't exist yet. Real-time permission materialization will become table stakes. Integration with existing databases will get so seamless you won't think about it.

\n

Key Takeaways

\n

If you take anything away from our fifth birthday celebration, let it be this:

\n
    \n
  1. Managed authorization infrastructure lets you focus on building features instead of managing database operations
  2. \n
  3. Relationship-based access control can express complex permissions elegantly instead of trying to force everything\ninto roles
  4. \n
  5. Community-driven development makes everyone's authorization better
  6. \n
  7. AI and authorization are going to become inseparable as AI agents are given access to more business data
  8. \n
\n

Authorization infrastructure has gone from \"development requirement\" to \"strategic advantage.\" The companies that figure\nthis out first will have a significant edge in keeping pace with quickening development cycles and heightene security\nneeds.

\n

Thanks to everyone who joined AuthZed for the celebration, and here's to the next five years of fixing access control\nfor everyone.

\n
\n

Want to try AuthZed Cloud? Sign up here and get started in minutes.

\n

Join our community on Discord and\nstar SpiceDB on GitHub.

", + "url": "https://authzed.com/blog/authzed-is-5-event-recap-authorization-infrastructure-insights", + "title": "AuthZed is 5: What We Learned from Our First Authorization Infrastructure Event", + "summary": "We celebrated our 5th birthday with talks from Canva, Turo, and Carnegie Mellon. Here's what we learned about the dual-write problem, scaling authorization in production, and why everyone keeps reimplementing the PostgreSQL wire protocol.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-09-02T18:00:00.000Z", + "date_published": "2025-09-02T18:00:00.000Z", + "author": { + "name": "Corey Thomas", + "url": "https://www.linkedin.com/in/cor3ythomas/" + } + }, + { + "id": "https://authzed.com/blog/authzed-cloud-is-now-available", + "content_html": "

Today marks a special milestone for AuthZed: we're celebrating our 5th anniversary! There are honestly too many thoughts and reflections swirling through my mind to fit into a single blog post. The reality is that most startups don't make it to 5 years, and I'm extremely proud of what we've built together as a team and community.

\n

If you want to hear me reflect on the journey of the past 5 years, I'm giving a talk today about exactly that, and we'll post a link to the recording here when it's ready. But today isn't just about looking back, it's also about looking forward, and I’ve personally been looking forward to launching our next iteration of authorization infrastructure: AuthZed Cloud.

\n

In this blog post, I'll cover what we've built and why, but if you don't need that context and just want to dive in, feel free to bail on this post and sign up right now!

\n

AuthZed Dedicated is the Perfect Product

\n

To understand why we built AuthZed Cloud, I need to first talk about AuthZed Dedicated, because in many ways, Dedicated represents our vision of the perfect authorization infrastructure product.

\n

AuthZed Dedicated is nearly infinitely scalable: capable of handling millions of queries per second when you need it. It's co-located with your workloads, which means there's no internet or cross-cloud latency penalty for your authorization decisions, which are often in the critical path for user interactions. It can run nearly anywhere on earth, with support for all three major cloud providers, giving you the flexibility to deploy where your business needs demand.

\n

Perhaps most importantly, Dedicated provides total isolation for each customer across datastore, network, and compute layers. It marries the best permissions database in the world (SpiceDB) with the best infrastructure design (Kubernetes + operators) to create what we believe is the best authorization infrastructure in the world.

\n

So how did we improve on this formula? We made it more accessible!

\n

AuthZed Dedicated's biggest challenge isn't technical: it's the enterprise procurement cycle that comes with it. The question we kept asking ourselves was: how can we bring these powerful concepts to more companies, especially those who need enterprise-grade authorization but can't navigate lengthy procurement processes?

\n

Introducing AuthZed Cloud

\n

AuthZed Cloud takes the most powerful concepts from AuthZed Dedicated and makes them available in a self-service product that you can start using today.

\n

We've also made several key improvements over what’s available in Dedicated today:

\n

Self-service registration and deployment: No more waiting weeks for procurement approvals or implementation calls. Sign up, configure your permissions system, and start building. Scale when you need to!

\n

Roles: We've added granular access controls that let you limit who can access and change things within your AuthZed organizations. This was a frequent request from teams who needed to federate access to our platform in different ways. You’ll be happy to know that this feature is, of course, also powered by SpiceDB.

\n

Usage-based billing: Instead of committing to fixed infrastructure costs upfront, you can spin up resources on-demand and pay for what you actually use.

\n

The best part? These improvements will also be landing in Dedicated soon, so all our customers benefit!

\n

Delivering on this vision does require some compromises. AuthZed Cloud uses a shared control plane and operates in pre-selected regions (though please let us know if you need a region we don't support today!). But honestly, that's about it for compromises.

\n

For Whom?

\n

AuthZed Cloud is designed for companies of all sizes. Despite the shared infrastructure approach, we've maintained high isolation standards. Your SpiceDB runs as separate Kubernetes deployments, and datastores are dedicated per permissions system. You still get the same scalable technology from Dedicated that allows you to scale up to millions of queries per second when needed, and the same enterprise-grade reliability.

\n

What makes Cloud special is how attainable it is. The base price is a fraction of our base Dedicated deployment price, opening up AuthZed's capabilities to a much broader range of companies.

\n

That said, some organizations should still consider Dedicated. You might consider dedicated if you need higher isolation requirements like an isolated control plane or private networking, or if you need higher flexibility around custom legal terms or deployment in cloud provider regions that AuthZed Cloud doesn't yet support.

\n

Early Access Reception

\n

The response during our early access period has been incredible. There was clearly pent-up demand for a product like this! We've had several long-time AuthZed customers already making the move to Cloud.

\n

Lita Cho, CTO at moment.dev, had this to say:

\n
\n

“We love Authzed—it makes evolving our permissions model effortless, with a powerful schema language, makes rapid\nprototyping possible along with rock-solid production performance, all without heavy maintenance. Authzed Cloud\ndelivers the power and reliability of Dedicated at a startup-friendly price, without the hassle of running SpiceDB. That\nlets me focus on building our modern docs platform, confident our authorization is secure, fast, and future-proof.”

\n
\n

Get Started Today

\n

The best part about AuthZed Cloud is that you can sign up immediately and get started building. We've also set up a program where you can apply for credits to help with your initial implementation and testing.

\n

As we celebrate five years of AuthZed, I'm more excited than ever about the problems we're solving and the direction we're heading. Authorization remains one of the most critical and complex challenges in modern software development, and we're committed to making it accessible to every team that needs it.

\n

Here's to the next five years of building the future of authorization together.

", + "url": "https://authzed.com/blog/authzed-cloud-is-now-available", + "title": "AuthZed Cloud is Now Available!", + "summary": "Bringing the power of AuthZed Dedicated to more with our new shared infrastructure, self-service offering: AuthZed Cloud.", + "image": "https://authzed.com/images/upload/AuthZed-Cloud-Blog@2x.png", + "date_modified": "2025-08-20T16:00:00.000Z", + "date_published": "2025-08-20T16:00:00.000Z", + "author": { + "name": "Jake Moshenko", + "url": "https://www.linkedin.com/in/jacob-moshenko-381161b/" + } + }, + { + "id": "https://authzed.com/blog/predicting-the-latest-owasp-top-10-with-cve-data", + "content_html": "

Predicting the latest OWASP Top 10 with CVE Data from 2022-2025

\n

OWASP is set to release their first Top 10 update since 2021, and this year’s list is one of the most awaited because of the generational shift that is AI. The security landscape has fundamentally shifted thanks to AI being embedded in production systems across enterprises from RAG pipelines to autonomous agents. I thought it would be a fun little exercise to look at CVE data from 2022-2025 and make predictions on what the top 5 in the updates list would look like. Read on to find out what I found.

\n

The OWASP Top 10 List

\n

The OWASP Top 10 is a regularly updated list of the most critical security risks to web applications. It’s a go-to reference for organizations looking to prioritize their security efforts. We’ve always had a keen eye on this list as it’s our mission to fix broken access control.

\n

The last 4 lists have been released in 2010, 2013, 2017 and 2021 with the next list scheduled for release soon, in Q3 2025.

\n

The OWASP Foundation builds this list using a combination of large-scale vulnerability data, community surveys, and expert input. The goal is to create a snapshot of the most prevalent and impactful categories of web application risks. So I thought I’ll crunch some numbers from CVE data that is publicly available.

\n
\n

Methodology

\n

This was not a scientific study — I’m not a data scientist, just an enthusiast in the cloud and security space. The aim here was to explore the data, learn more about how OWASP categories relate to CVEs and CWEs, and see if the trends point toward likely candidates for the upcoming list.

\n

Here’s the process I followed to get some metrics around the most common CVEs:

\n
    \n
  1. \n

    Collect CVEs from 2022–2025

    \n
      \n
    • I pulled data from the NVD (National Vulnerability Database) API. Yearly JSON feeds are available with data from all the CVEs
    • \n
    • Since the last list came out in 2021, I limited the dataset to CVEs with a published date between January 1, 2022 and July 31, 2025.
    • \n
    \n
  2. \n
  3. \n

    Map CWEs to OWASP Top 10 Categories

    \n
      \n
    • Each CVE is linked to one or more CWE (Common Weakness Enumeration) entries. A CWE is a community-developed list of common software and hardware weaknesses.
    • \n
    • I used OWASP’s official CWE mapping (when available) to map the CWEs to entries in the OWASP list.
    • \n
    \n
  4. \n
\n

For example:

\n

CWE-201 - ‘Insertion of Sensitive Information Into Sent Data’ maps to ‘Broken Access Control’.

\n

\"\"

\n
    \n
  • I extracted all the CWE IDs from the dataset and mapped a list of CWE IDs (e.g., \"CWE-201\") to their corresponding OWASP categories
  • \n
\n
def map_cwe_to_owasp(cwe_ids):\n   owasp_set = set()\n   for cwe in cwe_ids:\n       try:\n           cwe_num = int(cwe.replace(\"CWE-\", \"\"))\n           if cwe_num in CWE_TO_OWASP:\n               owasp_set.add(CWE_TO_OWASP[cwe_num])\n       except ValueError:\n           continue\n   return list(owasp_set)\n
\n
    \n
  • Here’s the mapping I made for the Top 8 categories from the 2021 list (truncated for readability, the full code is in the repo)
  • \n
\n
CWE_TO_OWASP = {\n   # A01: Broken Access Control\n   22: \"A01:2021 - Broken Access Control\",\n   23: \"A01:2021 - Broken Access Control\",\n   # ...\n   1275: \"A01:2021 - Broken Access Control\",\n\n\n   # A02: Cryptographic Failures\n   261: \"A02:2021 - Cryptographic Failures\",\n   296: \"A02:2021 - Cryptographic Failures\"\n   # ...,\n   916: \"A02:2021 - Cryptographic Failures\",\n\n\n   # A03: Injection\n   20: \"A03:2021 - Injection\",\n   74: \"A03:2021 - Injection\",\n   # ...\n   917: \"A03:2021 - Injection\",\n\n\n   # A04 Insecure Design\n   73: \"A04:2021 - Insecure Design\",\n   183: \"A04:2021 - Insecure Design\",\n   # ...\n   1173: \"A04:2021 - Insecure Design\",\n\n\n   # A05 Security Misconfiguration\n   2: \"A05:2021 - Security Misconfiguration\",\n   11: \"A05:2021 - Security Misconfiguration\",\n   # ...\n   1032: \"A05:2021 - Security Misconfiguration\",\n    \n   # A05 Security Misconfiguration\n   937: \"A06:2021 - Vulnerable and Outdated Components\",\n   # ...   \n   1104: \"A06:2021 - Vulnerable and Outdated Components\",\n\n\n   # A07:2021 - Identification and Authentication Failures\n   255: \"A07:2021 - Identification and Authentication Failures\",\n   259: \"A07:2021 - Identification and Authentication Failures\",\n   # ...\n   1216: \"A07:2021 - Identification and Authentication Failures\",\n\n\n   # A08:2021 - Software and Data Integrity Failures\n   345: \"A08:2021 - Software and Data Integrity Failures\",\n   353: \"A08:2021 - Software and Data Integrity Failures\",\n   # ...   \n   915: \"A08:2021 - Software and Data Integrity Failures\",\n
\n
    \n
  1. \n

    Map CVEs to CWEs

    \n
      \n
    • The NVD 2.0 dataset embeds weaknesses under cve.weaknesses[].description[].value with CWE IDs like CWE-201. I wrote a script to process the JSON containing NVD vulnerability data to extract CWE IDs for each CVE, and then map it to OWASP categories.
    • \n
    \n
  2. \n
\n
def process_nvd_file(input_path, output_path):\n   with open(input_path, \"r\") as f:\n       data = json.load(f)\n\n\n   results = []\n   for entry in data[\"vulnerabilities\"]:\n       cve_id = entry.get(\"cve\", {}).get(\"id\", \"UNKNOWN\")\n       cwe_ids = []\n\n\n       # Extract CWE IDs from weaknesses\n       for problem in entry.get(\"cve\", {}).get(\"weaknesses\", []):\n           for desc in problem.get(\"description\", []):\n               cwe_id = desc.get(\"value\")\n               if cwe_id and cwe_id != \"NVD-CWE-noinfo\":\n                   cwe_ids.append(cwe_id)\n\n\n       mapped_owasp = map_cwe_to_owasp(cwe_ids)\n\n\n       results.append({\n           \"cve_id\": cve_id,\n           \"cwe_ids\": cwe_ids,\n           \"owasp_categories\": mapped_owasp\n       })\n\n\n   with open(output_path, \"w\") as f:\n       json.dump(results, f, indent=2)\n\n\n   print(f\"Wrote {len(results)} CVE entries with OWASP mapping to {output_path}\")\n
\n

We now have a new JSON file with mapped outputs that has all the CVEs mapped to OWASP categories (if there’s a match). This is what it looks like:

\n
{\n    \"cve_id\": \"CVE-2024-0185\",\n    \"cwe_ids\": [\n      \"CWE-434\",\n      \"CWE-434\"\n    ],\n    \"owasp_categories\": [\n      \"A04:2021 - Insecure Design\"\n    ]\n  },\n  {\n    \"cve_id\": \"CVE-2024-0186\",\n    \"cwe_ids\": [\n      \"CWE-640\"\n    ],\n    \"owasp_categories\": [\n      \"A07:2021 - Identification and Authentication Failures\"\n    ]\n  },\n
\n

I ran this code snippet for each data set from 2022-2025 and had separate JSON files for each year.

\n
    \n
  1. Analyze the Data
  2. \n
\n

Now that we have this data of mapped outputs, we can run some data analysis to find the most common occurrences per year.

\n
    \n
  • I essentially counted the number of CVEs per OWASP category for each year.
  • \n
\n
for filename in os.listdir(DATA_DIR):\n\n# Loads the JSON data from the file, which contains a list of CVE entries.\n\n    year = filename.replace(\"mapped_output_\", \"\").replace(\".json\", \"\")\n    year_path = os.path.join(DATA_DIR, filename)\n\n    with open(year_path, \"r\") as f:\n        entries = json.load(f)\n\n    for entry in entries:\n        for category in entry.get(\"owasp_categories\", []):\n            yearly_data[year][category] += 1\n
\n
    \n
  • To get some visualizations around the data, I sorted and added graphs to see which categories were trending.
  • \n
\n
# Convert to a DataFrame\ndf = pd.DataFrame(yearly_data).fillna(0).astype(int).sort_index()\ndf = df.T.sort_index()  # years as rows\n\n# Save summary\ndf.to_csv(\"owasp_counts_by_year.csv\")\nprint(\"\\nSaved summary to owasp_counts_by_year.csv\")\n\n# Also print\nprint(\"\\n=== OWASP Category Counts by Year ===\")\nprint(df.to_string())\n\n# Plot OWASP trends over time\nplt.figure(figsize=(12, 7))\n\nfor column in df.columns:\n    plt.plot(df.index, df[column], marker='o', label=column)\n\nplt.title(\"OWASP Top 10 Category Trends (2022–2025)\")\nplt.xlabel(\"Year\")\nplt.ylabel(\"Number of CVEs\")\nplt.xticks(rotation=45)\nplt.legend(title=\"OWASP Category\", bbox_to_anchor=(1.05, 1), loc='upper left')\nplt.tight_layout()\nplt.grid(True)\nplt.show()\n
\n

This is what it looked like:

\n

\"\"

\n

Here’s a table with all the data:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
A01: Broken Access ControlA02:\u000b Cryptographic FailuresA03: \u000bInjectionA04: Insecure DesignA05:\u000b Security MisconfigurationA06: Vulnerable & Outdated ComponentsA07: Identification & Authentication FailuresA08: Software & Data Integrity Failures
202240043706496121715111233334
202354984118846148017811357468
2024718244713280192216341430584
2025431420975631056902774418
Totals209981437361855675582847941804
\n

So looking at purely the number of incidences in CVEs, the Top 5 would look like this:

\n

#5 Software and Data Integrity Failures
\n#4 Identification & Authentication Failures
\n#3 Insecure Design
\n#2 Broken Access Control
\n#1 Injection

\n

But wait, OWASP’s methodology in compiling the list involves not just the frequency (how common) but the severity or impact of each weakness. Also, 2 out of the 10 in the list are chosen from a community survey among application security professionals, to compensate for the gaps in public data. In the past OWASP has also merged categories to form a new category. So based on that here’s my prediction for the Top 5

\n

Prediction Time

\n

There’s absolutely no doubt in my mind that the security implications of AI will have a big impact on the list. One point of note is that OWASP released a Top 10 list of LLM in November 2024. Whether they decided to keep the two lists separate or have overlap will largely determine the Top 10 this year.

\n

So looking at the CVE data above (Broken Access Control and Injection had the most occurrences), and the rise of AI in production, here’s what I think will be the Top 5 in the OWASP list this year:

\n

#5 Software and Data Integrity Failures
\n#4 Security Misconfigurations
\n#3 Insecure Design
\n#2 Injection
\n#1 Broken Access Control

\n

With enterprises implementing AI Agents, RAG Pipelines and Model Context Protocol (MCP) in production, access control becomes a priority. Broken Access Control topped the list in 2021, and we’ve seen a slew of high profile data breaches recently so I think it will sit atop the list this year as well.

\n

I asked Jake Moshenko, CEO of AuthZed about his Predictions for the list and while we agreed on the #1 position on the list, there were also a couple of things where we disagreed. Watch the video to find out what Jake thought the Top 5 would look like and which category he thinks might drop out of the Top 10 altogether.

\n\n
\n

Caveats

\n

As I mentioned before, I’m not a data scientist so please feel free to improve upon this methodology in the Github Repo. I also need to state that:

\n
    \n
  • CVE data doesn’t represent all real-world vulnerabilities (e.g., business logic flaws are underreported). Also, vulnerabilities not related to web apps (eg: buffer overflow) were not considered.
  • \n
  • This approach only looks at vulnerability frequency, not impact or exploitability which are factors that OWASP also considers.
  • \n
  • OWASP’s real methodology includes community surveys, and telemetry from industry partners - which wasn’t part of this experiment.
  • \n
\n
\n

Your Turn

\n

What do you think the 2025 OWASP Top 10 will look like?
\nDo you agree with these trends, or do you think another category will spike?
\nI’d love to hear your thoughts in the comments on LinkedIn, BlueSky or Twitter

\n

If you want to replicate this yourself, I’ve put the dataset links and code snippets on GitHub.

", + "url": "https://authzed.com/blog/predicting-the-latest-owasp-top-10-with-cve-data", + "title": "Predicting the latest OWASP Top 10 with CVE data ", + "summary": "OWASP is set to release their first Top 10 update since 2021, and this year’s list is one of the most awaited because of the generational shift that is AI. The security landscape has fundamentally shifted thanks to AI being embedded in production systems across enterprises from RAG pipelines to autonomous agents. I thought it would be a fun little exercise to look at CVE data from 2022-2025 and make predictions on what the top 5 in the updates list would look like. Read on to find out what I found.", + "image": "https://authzed.com/images/blogs/authzed-predict-owasp.png", + "date_modified": "2025-08-13T18:50:00.000Z", + "date_published": "2025-08-13T18:50:00.000Z", + "author": { + "name": "Sohan Maheshwar", + "url": "https://www.linkedin.com/in/sohanmaheshwar/" + } + }, + { + "id": "https://authzed.com/blog/prevent-ai-agents-from-accessing-unauthorized-data", + "content_html": "

I just attended the Secure Minds Summit in Las Vegas, where security and application development experts shared lessons learned from applying AI in their fields. Being adjacent to Black Hat 2025, it's not surprising that a common theme was the security risks of AI agents and MCP (Model Context Protocol). There's an anxious excitement in the community about AI's potential to revolutionize how organizations operate through faster, smarter decision-making, while grappling with the challenge of doing it securely.

\n

Why Permissions Matter in the Age of AI

\n

As organizations explore AI agent deployment, one thing is clear: neither employees nor AI agents should have access to all data. You wouldn't want a marketing AI agent accessing raw payroll data, just as you wouldn't want an HR agent viewing confidential product roadmaps. Without proper access controls, AI agents can create chaos just as easily as they deliver value, since they don't inherently understand which data they should or shouldn't access.

\n

This is where robust permissions systems become critical. Proper access controls ensure AI agents operate within organizational policy boundaries, accessing only data they're explicitly authorized to use.

\n

Watch: How to Implement Access Controls for AI Agents

\n

Sohan, our Lead Developer Advocate at AuthZed, recently explored this topic on the AuthZed YouTube channel with a live demo of implementing AI-aware permissions systems.

\n

Watch the demo here:

\n\n

Authorization Infrastructure for AI: Built for Scale and Safety

\n

In June, we launched AuthZed's Authorization Infrastructure for AI, purpose-built to ensure AI systems respect permissions, prevent data leaks, and maintain comprehensive audit trails.

\n

AuthZed's infrastructure is powered by SpiceDB, our open-source project based on Google's Zanzibar. SpiceDB's scale and speed make it an ideal authorization solution for supporting AI's demanding performance requirements.

\n

Our infrastructure delivers:

\n
    \n
  • Billions of access control lists (ACLs)
  • \n
  • Millions of authorization checks per second
  • \n
  • Global replication across data centers
  • \n
\n

Want to learn more about the future of AuthZed and authorization infrastructure for AI? Join us on August 20th for \"AuthZed is 5: The Authorization Infrastructure Event.\" Register here.

", + "url": "https://authzed.com/blog/prevent-ai-agents-from-accessing-unauthorized-data", + "title": "Prevent AI Agents from Accessing Unauthorized Data", + "summary": "AI agents promise to revolutionize enterprise operations, but without proper access controls, they risk exposing sensitive data to unauthorized users. Learn how AuthZed's Authorization Infrastructure for AI prevents data leaks while supporting millions of authorization checks per second. Watch our live demo on implementing AI-aware permissions systems.\n\n", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-08-08T15:46:00.000Z", + "date_published": "2025-08-08T15:46:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/authzed-is-5-authorization-infrastructure-event", + "content_html": "

The Authorization Infrastructure Event

\n

AuthZed is turning five years old, and we're throwing a celebration! On Wednesday, August 20th, we're hosting \"The Authorization Infrastructure Event\" by bringing together experts in authorization and database technology to talk about where this space is headed.

\n

RSVP Here

\n

What We Have Planned

\n

You'll hear from industry experts who've been shaping how we think about authorization:

\n
    \n
  • Artie Shevchenko, Software Engineer at Canva
  • \n
  • Andy Pavlo, Professor, Databaseology, Carnegie Mellon University
  • \n
  • Andre Sanches, Software Engineer, Turo
  • \n
\n

And the AuthZed team will be sharing what we've been building—new product announcements, plus a peek into our lab:

\n
    \n
  • Jake Moshenko, CEO of AuthZed
  • \n
  • Irit Goihman, VP of Engineering at AuthZed
  • \n
  • Sohan Maheshwar, Developer Advocate at AuthZed
  • \n
\n

We’ll be announcing new products that I think will genuinely change how people approach authorization infrastructure and I’m particularly excited to finally share about what we've been exploring in our lab, experimental work that could shape the future of access control.

\n

A Personal Reflection

\n

It's hard to believe but five years have gone by so fast. Back when I joined Jake, Jimmy, and Joey as the first employee, they had this clear understanding of why application authorization was such a pain point for developers, the Google Zanzibar paper as their guide, and an ambitious vision: bring better authorization infrastructure to everyone who needed it.

\n

\"\"

\n

Photo from our first team offsite in 2021. Not pictured: me because I'm taking the photo

\n

Looking back at our journey, some moments that stand out:

\n\n

We've grown from that small founding team to a group of people who genuinely care about solving authorization the right way. Along the way, we've had the privilege of helping everyone from early-stage startups to large enterprises build and scale their applications without the usual authorization headaches.

\n

Join Us

\n

This event is our chance to share our latest work with the community that's supported us, celebrate how far we've all come together, and get a glimpse of what's ahead.

\n

Whether you've been following our journey from the beginning or you're just discovering what we're about, we'd love to have you there. It's going to be the kind of event where you leave with new ideas, maybe some useful insights, and definitely a better sense of where authorization infrastructure is headed.

\n

Want to share a birthday message with us? Record a short message here—we'd genuinely love to hear from you and share some of them during the event.

\n

See you on August 20th!

", + "url": "https://authzed.com/blog/authzed-is-5-authorization-infrastructure-event", + "title": "Celebrate With Us: AuthZed is 5!", + "summary": "AuthZed is turning five years old! Join us Wednesday, August 20th for our Authorization Infrastructure Event, where we're bringing together industry experts and sharing exciting new product developments plus experimental work from our lab.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-07-23T09:36:00.000Z", + "date_published": "2025-07-23T09:36:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/coding-with-ai-my-personal-experience", + "content_html": "

I’ve been in tech for over 20 years. I’ve written production code in everything from Fortran to Go, and for the last five of those years, I’ve been a startup founder and CEO. These days, I spend most of my time operating the business, not writing code. But recently, I dipped back in. I needed a new demo built, and fast.

\n

It wasn’t a simple side project. This demo would ideally have multiple applications, all wired into SpiceDB, built with an obscure UI framework, and designed to show off what a real-world, multi-language, permission-aware system looks like. Naturally, I started thinking about who should build it.

\n

Should I ask engineering? Probably not a good idea since I didn’t want to interrupt core product work. What about an intern? Too late in the year for that. Maybe a contractor? I’ve had mixed results there. Skills tend to be oversold, results can fall short, and just finding and vetting someone would take time I didn’t have.

\n

Just prior to this, Anthropic had just released Claude Code and Claude 4. A teammate (with good taste) had good things to say about the development experience, and internet consensus seems to be that (for today at least) Claude is kind for coding models, so I figured I’d give it a try. I’m no novice to working with AI: I have been a paying customer of OpenAI’s since Dall-E and ChatGPT had their first public launches. At AuthZed we also make extensive use of the AI features that are built into some of our most beloved tools, such as: Notion, Zoom, Figma, and GitHub. Many of these features have been helpful, but none felt like a game changer.

\n

Getting Started

\n

At first, I wasn’t sure how much Claude Code could take on. I didn’t know how to structure my prompts or how detailed I needed to be. I started small: scaffold a project, get a “hello world” working, and set up the build system. It handled all of that cleanly.

\n

Encouraged, I got a little overconfident. My prompts grew larger and fuzzier. The quality of output dropped quickly. I also didn’t have a source control strategy in place, and when Claude Code wandered off track, I lost a lot of work. It’s fantastically bad at undoing what it just did! It was a painful but valuable learning experience.

\n

Figuring Out a Process

\n

Eventually, I found my rhythm. I started treating Claude Code like a highly capable but inexperienced intern. I wrote prompts as if they were JIRA tickets: specific, structured, and assuming zero context. I broke the work down into small, clear deliverables. I committed complete features as I went. When something didn’t feel right, I aborted early, git reverted, and started fresh.

\n

\"\"

\n

That approach worked really well.

\n

\"\"

\n

\"\"

\n

By the end of the project, Claude Code and I had built three application analogues for tools that exist in the Google Workspace suite, in three different languages! We wrote a Docs-like in Java, a Groups-like in Go, and a Gmail-like in Javascript, and a frontend coded up in a wacky wireframe widget library called Wired Elements. Each one was connected through SpiceDB, shared a unified view of group relationships, and included features like email permission checks and a share dialog in the documents app. It all ran in Docker with a single command. The entire effort cost me around $75 in API usage.

\n

Check it out for yourself: https://github.com/authzed/multi-app-demo

\n

Could I have done this on my own? Sure, in theory. But I’m not a UI expert, and switching between backend languages would have eaten a lot of time. If I’d gone the solo route, I would’ve likely over-engineered the architecture to minimize how much code I had to write, which might have resulted in something more maintainable, but also something unfinished and way late.

\n

Phenomenal Cosmic Power

\n

\"\"

\n

This was a different experience than I’d had with GitHub Copilot. Sometimes people describe Copilot as “spicy autocomplete”, and that feels apt. Claude Code felt like having a pair programmer who could actually build features with me.

\n

My buddy Jason Hall from Chainguard put it best in a post on LinkedIn: “AI coding agents are like giving everyone their own mech suit.” and “...if someone drops one off in my driveway I'm going to find a way to use it.”

\n

\"\"

\n

For the first time in a long while, I felt like I could create again. As a CEO, that felt energizing. It also made me start wondering what else I could personally accelerate.

\n

Of course, I had some doubts. Maybe this only worked because it was greenfield. Maybe I’d regret not being the expert on the codebase. But the feeling of empowerment was real.

\n

The Next Project: CRM Migration

\n

At the same time, we had a growing need to migrate our sales CRM. We’d built a bespoke system in Notion, modeled loosely after Salesforce. Meanwhile, all of our marketing data already lived in HubSpot. It was time to unify everything.

\n

On paper, this looked straightforward: export from Notion, import into HubSpot. In reality, it was anything but. Traditional CRM migrations are done with flattened CSV files; that wouldn’t play nicely with the highly relational structure we’d built. And with so much existing marketing data in HubSpot, this was more of a merge than a migration.

\n

I’ve been through enough migrations to know better than to try a one-shot cutover. It never goes right the first time, and data is always messier than expected. So I came up with a different plan: build a continuous sync tool.

\n

The idea was to keep both systems aligned while we gradually refined the data. That gave us time to validate everything and flip the switch only when we were ready. Both Notion and HubSpot have rich APIs, so I turned again to Claude Code.

\n

The Results

\n

Over the course of a week, Claude Code and I wrote about 5,000 lines of JavaScript. The tool matched Notion records to HubSpot objects using a mix of exact matching and fuzzy heuristics. We used Levenshtein distance to help with tricky matches caused by accented names or alternate spellings. The tool handled property synchronization and all the API interactions needed to link objects across systems.

\n

The cost came in at around $50 in Claude Code credits.

\n

Could I have done it myself? Technically, yes. But it would have taken me a lot longer. I’m not fluent in JavaScript, and if I had been writing by hand, I would’ve insisted on TypeScript and clean abstractions. That would have been a waste of time for something we were planning to throw away after the migration.

\n

My Take on AI Coding

\n

Our current generation of coding agents are undeniably powerful. Yes, they’re technically still just a next-token predictor, but that description misses the point. It’s like saying Bagger 288 is “just a big shovel.” Sure, but it’s a shovel that can eat mountains.

\n

I now feel confident taking on software projects again in my limited spare time. That’s not something I expected to feel again as a full-time CEO. And the most exciting part? This is probably the worst that these tools will ever be. From here, the tools only get better. Companies like OpenAI, with Codex, and Superblocks are already riffing on other possible user experiences for coding agents. I’m keen to see where the industry goes.

\n

It also seems clear that AI will play a bigger and bigger role in how code gets written. As an API provider, we’re going to need to design for that reality. In the not-too-distant future, our primary users will likely be coding agents, not just humans.

\n

Looking Ahead

\n

We’re in the middle of a huge transformation, not just in software, but across the broader economy. The genie is out of the bottle. Even if the tools stopped improving tomorrow (and I don’t think they will) there’s already enough capability to change the way software gets built.

\n

I’ll admit, it’s a little bittersweet. For most of my career, I have self-identified as a computer whisperer: someone who can speak just the right incantations to make computers (or sometimes whole datacenters) do what I need. But like most workplace superpowers, this one also turned out to be a time-limited arbitrage opportunity.

\n

What hasn’t changed is the need for control. As AI gets more capable, the need for clear, enforceable boundaries becomes more important than ever. The answer to “what should this AI be allowed to do?” isn’t “more AI.” It’s strong, principled authorization.

\n

That’s exactly what we’re building at AuthZed. And you’ll be seeing more from us soon about how we’re thinking about AI-first developer experience and AI-native authorization.

\n

Stay tuned.

", + "url": "https://authzed.com/blog/coding-with-ai-my-personal-experience", + "title": "Coding with AI: My Personal Experience", + "summary": "AuthZed CEO Jake Moshenko shares his experience coding with AI.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-07-16T08:21:00.000Z", + "date_published": "2025-07-16T08:21:00.000Z", + "author": { + "name": "Jake Moshenko", + "url": "https://www.linkedin.com/in/jacob-moshenko-381161b/" + } + }, + { + "id": "https://authzed.com/blog/authzed-cloud-is-coming-soon", + "content_html": "

Here at AuthZed, we are counting down the days until we launch AuthZed Cloud because we are so eager to bring the power of our authorization infrastructure to every company, large and small. If you're just as excited as we are about AuthZed Cloud, sign up for the waitlist. We will be in touch with AuthZed Cloud news, and you'll be the first to know when the product launches.

\n\n

From Enterprise Focus to Self-Service

\n

\"\"

\n

From the start of our journey, we have had a strong focus on serving the needs of authorization at enterprise businesses. Our most popular product, AuthZed Dedicated, is a reflection of that focus as it caters to those looking for dedicated hardware resources and fully-isolated deployment environments. However, not everyone has such strict requirements, and there are many companies who prefer a self-service product where they can sign up, manage their deployments from a single, shared control plane with other users, and pay for dynamic usage with a credit card. The latter is how we consumed most of our high-value services at our last startup when we were building the first enterprise container registry: Quay.io. In fact, you can read more about our journey from Quay to AuthZed here.

\n

Setting New Standards for Authorization Security

\n

The most gratifying part of creating AuthZed has been working alongside so many amazing companies that are changing the landscape of various industries. It's truly validating to see them come to the same conclusion: homegrown authorization solutions are not sufficient for modern businesses. With AuthZed Cloud, we expect to expand the number of companies we can work alongside to set a new standard of security that ensures the safety of all of our private data by fixing access control.

", + "url": "https://authzed.com/blog/authzed-cloud-is-coming-soon", + "title": "AuthZed Cloud is Coming Soon", + "summary": "AuthZed Cloud is coming soon, expanding beyond enterprise-only solutions to offer self-service authorization infrastructure for companies of all sizes. Join our waitlist to be first in line when we launch this game-changing platform.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-07-03T10:31:00.000Z", + "date_published": "2025-07-03T10:31:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/authzed-brings-additional-observability-to-authorization-via-the-datadog-integration", + "content_html": "

Today, AuthZed is providing additional observability capabilities to AuthZed's cloud products with the introduction of our official Datadog Integration. All critical infrastructure should be observable and authorization is no exception. Our integration with Datadog gives engineering teams instant insight into authorization performance, latency, and anomalies—without adding custom tooling or overhead.

\n

With this new integration, customers can now centralize that observability data with the rest of their data in Datadog—giving them the ability to correlate events across their entire platform. AuthZed's cloud products continue to include a web console with out-of-the-box dashboards containing metrics across the various infrastructure components that power a permissions system. At the same time, users of the Datadog integration will also have a mirror of these dashboards available in Datadog if they do not wish to create their own.

\n

\"Authzed

\n

\"Being able to visualize how AuthZed performs alongside our other systems gives us real peace of mind,\" said Eric Zaporzan, Director of Infrastructure, at Neo Financial. \"Since we already use Datadog, it was simple to send AuthZed metrics there and gain a unified view of our entire stack.\"

\n

AuthZed metrics allow developers and SREs to monitor their deployments, including request latency, cache metrics (such as size and hit/miss rates), and datastore connection and query performance. These metrics help diagnose performance issues and fine-tune the performance of their SpiceDB clusters.

\n

Get Started Using Datadog with AuthZed in 7 Steps

\n

The Datadog integration is available in the AuthZed Dashboard under the “Settings” tab on a Permission System.

\n
    \n
  1. Go to the dashboard homepage.
  2. \n
  3. Select a Permission System for which to submit metrics.
  4. \n
  5. Click on the Settings tab.
  6. \n
  7. Scroll down to the Datadog Metrics block of the settings UI.
  8. \n
  9. Enter your Datadog account API key.
  10. \n
  11. Enter your Datadog site if different from the default.
  12. \n
  13. Click Save.
  14. \n
\n

To ensure that the dashboard graph for latency correctly shows the p50, p95, and p99 latencies, you’ll also need to set the Percentiles setting for the authzed.grpc.server_handling metric in the Metrics Summary view to ON.

\n

TADA 🎉 You should see metrics start to flow to Datadog shortly thereafter.

\n

I want to thank all of the AuthZed engineers involved in shipping this feature, but especially Tanner Stirrat who shepherded this project from inception and I can't wait to see all the custom dashboards our customers make in the future!
\n
\nInterested in learning more? Join our Office Hours on July 3rd here on YouTube.

\n

", + "url": "https://authzed.com/blog/authzed-brings-additional-observability-to-authorization-via-the-datadog-integration", + "title": "AuthZed Brings Additional Observability to Authorization via the Datadog Integration", + "summary": "Gain instant visibility into your authorization layer with AuthZed’s new Datadog integration. Stream SpiceDB metrics—latency, cache efficiency, datastore performance, and more—into the dashboards you already trust, so you can correlate events across your stack and troubleshoot faster without extra tooling.", + "image": "https://authzed.com/images/blogs/blog-eng-datadog-integration-hero-2x.png", + "date_modified": "2025-06-24T08:00:00.000Z", + "date_published": "2025-06-24T08:00:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/authzed-announces-support-for-ai-by-providing-permissions-aware-ai", + "content_html": "

Introducing Authorization Infrastructure for AI

\n

Secure your AI systems with fine-grained authorization for RAG pipelines and agents

\n

Today we are announcing Authorization Infrastructure for AI, providing official support for Retrieval-Augmented Generation (RAG) pipelines and agentic AI systems. With this launch, teams building AI into their applications, developing AI products or building an AI company can enforce fine-grained permissions across every stage - from document ingestion to vector search to agent behavior - ensuring data is protected, actions are authorized, and compliance is maintained.

\n

AI is quickly becoming a first-class feature in modern applications. From retrieval-augmented search to autonomous agents, engineering teams are building smarter user experiences by integrating large language models (LLMs) into their platforms.

\n

But with that intelligence comes risk.

\n

AI systems do not just interact with public endpoints. They pull data from sensitive internal systems, reason over embeddings that bypass traditional filters, and trigger actions on behalf of users. Without strong access control, they can expose customer records, cross tenant boundaries, or operate with more agency than intended.

\n

This is the authorization problem for AI. And it is one every team building with LLMs now faces.

\n

Authorization for AI is not optional

\n

When you add AI to your application, you also expand your attack surface. Consider just a few examples:

\n
    \n
  • An LLM that retrieves documents from internal systems but fails to check who is asking
  • \n
  • An agent that books travel but can also access payroll data
  • \n
  • A vector store filled with sensitive documents, exposed through approximate search
  • \n
\n

According to the OWASP Top 10 for LLM Applications, four of the top risks require robust authorization controls as a primary mitigation. And yet, most developers are still relying on brittle, manual enforcement scattered across their codebases.

\n

We believe it’s time for a better solution.

\n

\"AuthZed

\n

Meet AuthZed's Authorization Infrastructure for AI

\n

AuthZed’s authorization infrastructure for AI brings enterprise-grade permission systems to AI workloads. AuthZed has been better positioned to support AI from the get-go because of SpiceDB.

\n

SpiceDB is an open-source Google Zanzibar-inspired database for storing and computing permissions data that companies use to build global-scale fine grained authorization services. Since it is based on Google Zanzibar’s proven architecture, it can scale to massive datasets while handling complex permissions queries. In fact SpiceDB can scale to trillions of access control lists and millions of authorization checks per second.

\n

“AI systems are only as trustworthy as the infrastructure that governs them,\" said Janakiram MSV, industry analyst of Janakiram & Associates. \"AuthZed’s SpiceDB brings proven, cloud-native authorization principles to AI, delivering the control enterprises need to adopt AI safely and at scale.”

\n

Using SpiceDB to enforce access policies at every step of your AI pipeline ensures that data and actions remain properly governed. With AuthZed’s Authorization Infrastructure for AI, teams can safely scale their AI features without introducing security risks or violating data boundaries.

\n

Securing RAG pipelines with fine-grained access control

\n

Retrieval-Augmented Generation improves the usefulness of LLMs by injecting external knowledge. But when that knowledge includes sensitive customer or corporate data, access rules must be enforced at every stage.

\n

AuthZed enables teams to:

\n
    \n
  • Pre-filter content before generating embeddings
  • \n
  • Post-filter vector search results to remove unauthorized documents
  • \n
  • Maintain real-time permission syncs with systems like Google Workspace or SharePoint
  • \n
  • Build permission-aware retrieval layers that balance relevance with compliance
  • \n
\n

Whether you are building with a private knowledge base, CRM data, or support logs, SpiceDB ensures your AI respects the same access controls as the rest of your systems.

\n

Governing agent behavior with clear permission boundaries

\n

AI agents are designed to act autonomously, but autonomy without boundaries is dangerous. With the AuthZed Agentic AI Authorization Model, teams can enforce clear limits on what agents can access and do.

\n

This model includes:

\n
    \n
  • Functionality Control: Define and restrict which tools an agent can use
  • \n
  • Permissions Management: Apply inherited user permissions to agent behavior
  • \n
  • Autonomy Oversight: Introduce approvals for high-impact actions and maintain full audit logs
  • \n
\n

Whether your agent is summarizing data, booking a meeting, or triggering a workflow, it should only ever do what it is explicitly allowed to do.

\n

What this looks like in practice

\n

Let’s say an employee types a natural language query into your internal AI assistant:

\n

“What was our Q3 revenue?”

\n

Without authorization, the assistant might retrieve sensitive board slides or budget drafts and present them directly to the user. No checks, no logs, no traceability.

\n

With AuthZed:

\n
    \n
  • The system checks the employee’s permissions
  • \n
  • Only authorized financial data is retrieved
  • \n
  • An audit log is created for compliance
  • \n
  • AI operates with the same access controls as the rest of the application
  • \n
\n

This is what AuthZed’s Authorization Infrastructure for AI makes possible.

\n

Built for builders

\n

You should not have to choose between building smart features and maintaining secure boundaries. With AuthZed:

\n
    \n
  • Authorization integrates into your AI stack in minutes, not months
  • \n
  • SpiceDB scales with your users, tenants, and access models
  • \n
  • RAG and agent systems become extensions of your existing permission architecture
  • \n
\n

And it is already being used in production. Workday uses AuthZed Dedicated to\nsecure its AI-driven contract lifecycle platform. Other major AI providers rely on SpiceDB to enforce permissions across\nmulti-tenant LLM infrastructure.

\n

Get started quickly

\n

If you are building AI features, AuthZed’s Authorization Infrastructure for AI helps you ship faster by allowing you to focus on your product, instead of cobbling together an authorization solution. Whether you are securing vector search, gating agent behavior, or building out internal tools, AuthZed provides the authorization infrastructure you need.

\n", + "url": "https://authzed.com/blog/authzed-announces-support-for-ai-by-providing-permissions-aware-ai", + "title": "Introducing Authorization Infrastructure for AI ", + "summary": "AuthZed provides permissions systems that help secure and improve your RAG and agentic AI systems", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-06-13T09:00:00.000Z", + "date_published": "2025-06-13T09:00:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/introducing-the-authzed-cloud-api", + "content_html": "

Infrastructure for Authorization

\n

For the team at AuthZed, our mission is to fix access control. The first step is creating the foundational infrastructure for others to build their access control systems upon. Infrastructure for Authorization, you say? Didn't infrastructure just go through its largest transformation ever with cloud computing? From introduction to the eventual mass adoption of cloud computing, the industry has had to learn to manage all of the cloud resources they created. In response, cloud providers offered APIs for managing resource lifecycles. Our infrastructure follows this same pattern, so today we're proud to announce the AuthZed Cloud API is in Tech Preview.

\n

AuthZed Cloud API

\n

The AuthZed Cloud API is a RESTful JSON API for managing the infrastructure provisioned on AuthZed Dedicated Cloud. Today, it is able to list the available permissions systems and fully manage the configuration for restricting API-level access to SpiceDB within those permissions systems.

\n

As with all Tech Preview functionality, to get started, you must reach out to your account team and request access. Afterwards, you will be provided credentials for accessing the API. With these credentials, you're free to automate AuthZed Cloud infrastructure in any way you like! We recommend getting started by heading over to Postman to explore the API. Next, why not break out a little bit of curl?

\n

Listing all of your permissions systems:

\n
curl --location 'https://api.$YOUR_AUTHZED_DEDICATED_ENDPOINT/ps' \\\n     --header 'X-API-Version: 25r1' \\\n     --header 'Accept: application/json' \\\n     --header 'Authorization: Bearer $YOUR_CREDENTIALS_HERE' | jq .\u000b[​​{\n   \"id\": \"ps-8HXyWFOzGtk0Yq8dH0GBT\",\n   \"name\": \"example\",\n   \"systemType\": \"Production\",\n   \"systemState\": {\n     \"status\": \"RUNNING\"\n   },\n   \"version\": {\n     \"selectedChannel\": \"Rapid\",\n     \"currentVersion\": {\n       \"displayName\": \"SpiceDB 1.41.0\",\n       \"version\": \"v1.41.0+enterprise.v1\",\n       \"supportedFeatureNames\": [\n         \"FineGrainedAccessManagement\"\n       ]\n     }\n   }\n }]\n
\n

Take note of the required headers: the API requires specifying a version as a header so that changes can be made to the API in the future releases.

\n

I'm eager to see all of the integrations our customers will build with API-level access to our cloud platform! Look out for another announcement coming very soon about an integration that we've built using this new API, too!

\n

Join us on the mission to fix access control.

\n

Schedule a call with us to learn more about how AuthZed can help you.

", + "url": "https://authzed.com/blog/introducing-the-authzed-cloud-api", + "title": "Introducing The AuthZed Cloud API", + "summary": "Announcing the AuthZed Cloud API in Tech Preview—an API for managing AuthZed Dedicated Cloud infrastructure. Following the cloud computing pattern of lifecycle management APIs, this new tool allows you to manage permissions systems and restrict API-level access to SpiceDB within your authorization infrastructure.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-05-28T12:00:00.000Z", + "date_published": "2025-05-28T12:00:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/a-closer-look-at-authzed-dedicated", + "content_html": "

At AuthZed, our mission is to fix broken access control. After years of suffering in industry from insufficient solutions for building authorization systems, we concluded that we'd have to start from the ground up by building the right infrastructure software. SpiceDB, open sourced in late 2021, was our first-step to providing the solution that modern enterprises need. AuthZed Dedicated Cloud, often referred to as simply Dedicated, launched in early 2022 and productized SpiceDB by offering a dedicated cloud platform for provisioning SpiceDB deployments similar to the user experience you'd find provisioning infrastructure on a major cloud provider.

\n

\"\"

\n

What Are Dedicated Clouds?

\n

Dedicated Clouds are a relatively new concept. When AWS hit the market, the term Public Cloud was coined; Public Clouds are cloud platforms that share their underlying hardware resources across a variety of customers. At the same time this term got coined, folks needed a term used to refer to what most folks were already doing before AWS launched: running their own dedicated infrastructure. Unfortunately, instead of calling this Dedicated Cloud, it became known as Private Cloud. So what are Dedicated Clouds? Well, they're the middle ground between Private and Public Clouds; Dedicated Clouds provide varying levels of isolation and dedicated resources than Public Clouds, but aren't placing end users fully in control quite like the traditional Private Cloud. Enterprises in regulated industries, or those that want to isolate particularly sensitive data, increasingly reach for Dedicated Cloud because it can provide most of the niceties of the Public Cloud while also delivering better security.

\n

\"\"

\n

The Evolution of AuthZed Dedicated

\n

When AuthZed looked to create the first commercial offering of SpiceDB, we looked at where the industry was heading and implemented a Serverless product. However, it turned out that most enterprises value peace of mind that comes from isolating their authorization data from a shared data plane with other tenants. This was a happy coincidence because at the same time we learned that the best way to operate low-latency systems is to isolate workloads by having dedicated hardware resources. With our new insights, we launched Dedicated, our \"middleground\" that provided dedicated cloud environments with reserved compute resources and private networking. Dedicated customers get a private control plane deployed into their cloud regions of choice where they can provision their own deployments using our web console, API, or Terraform/OpenTOFU. Remaining true to the Infrastructure-as-a-Service (IaaS) spirit, pricing is done on a resource consumption basis.

\n

Looking Ahead to AuthZed Cloud

\n

Since launch, Dedicated immediately became our flagship product. However, we recognized that some customers didn't require all of its isolation features.These are the same users looking for a self-service product to try things out without a long enterprise sales cycle. Our Serverless product inadvertently fits this description, but it's a limited experience compared to Dedicated. What if we could bridge the gap and bring a version of our Dedicated product where customers could share the control plane? We're calling this AuthZed Cloud (as opposed to AuthZed Dedicated Cloud) and it's under active development and expected to launch later this year. Best of all, because both Cloud and Dedicated will share the same codebase, all of the self-service features we're building will also be coming to Dedicated.

\n

If you are interested in learning more about AuthZed Cloud, you can sign up here for the beta waitlist.

\n

\"\"

", + "url": "https://authzed.com/blog/a-closer-look-at-authzed-dedicated", + "title": "A Closer Look at AuthZed Dedicated", + "summary": "AuthZed tackles broken access control through innovative authorization infrastructure. After launching open-source SpiceDB in 2021, they created AuthZed Dedicated Cloud—offering enterprises the security benefits of private clouds with public cloud convenience. This middle-ground solution provides isolated authorization data processing with dedicated resources, perfect for regulated industries requiring enhanced security.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-05-20T13:00:00.000Z", + "date_published": "2025-05-20T13:00:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/building-better-authorization-infrastructure-with-arm", + "content_html": "

How ARM helps AuthZed build and operate authorization infrastructure, from day-to-day productivity gains to cost-effective, performant cloud compute.

\n

Meeting Modern Development Challenges

\n

Today's cloud-native development environment requires running a growing list of simultaneous services: container orchestration, monitoring, databases, observability tools, and more. For engineering teams, this creates a critical challenge: how to balance performance, cost, and efficiency across both development environments and production deployments.

\n

At AuthZed, we provide flexible, scalable authorization infrastructure—the permissions systems that secure access for your applications’ data and functionality—enabling engineering teams to focus on building what matters—their core products. For our customers using AuthZed's dedicated cloud, the balance of performance, cost, and efficiency is also crucial—they expect a reliable, performant, and cost-effective solution.

\n

ARM architecture has become our strategic advantage in meeting these challenges across our entire workflow.

\n

The ARM Advantage for Development

\n

The availability of ARM-based laptops with customizable configurations and ample RAM has transformed our development environment. Our journey began with ARM processors in early 2022 and expanded to more powerful variants as they became available. The developer community quickly adopted these machines, and tooling and library support rapidly matured, enabling us to fully adopt ARM as our primary architecture in development.

\n

Developer Productivity in Action

\n

At AuthZed, we work with distributed systems and databases daily, and running the full stack locally can be resource-intensive, often requiring significant CPU and memory. ARM's efficient performance helps utilize machine capacity, while its energy efficiency keeps our laptops cool enough to truly stay on laps—even when running our resource-intensive local environment.

\n

After upgrading to higher-performance ARM-based laptops, notable improvements compared to our previous development environment included:

\n
    \n
  • 27% decrease in average container image build times
  • \n
  • 40% decrease in parallel build times for our application stack
  • \n
  • Ability to run our entire application stack locally, including supporting monitoring and observability services
  • \n
\n

The qualitative benefits have been even more significant—true mobility with our laptops due to minimal battery drain and absence of overheating, smoother performance during resource-intensive tasks, and most importantly, tighter feedback loops during debugging and testing.

\n

CI/CD with ARM

\n

AuthZed has been building and publishing multi-architecture Docker images for our tools and authorization database for over three years (since March 2022), so we recognized the value of multi-architecture support in CI/CD early on.

\n

There's now robust support for third-party ARM-based action runners for GitHub Actions, our CI/CD platform. Combined with toolchain maturity across runner images for popular architectures, migration to ARM for CI/CD has never been easier.

\n

Build and test workflows are unique to each project and evolve as the project develops. Consequently, the benefits and tradeoffs for a CI/CD platform change over time. We've benefited from being able to easily migrate between architectures and runner providers to best meet our engineering needs at different stages.

\n

Powering AuthZed Dedicated with ARM

\n

Major providers like Google Cloud, AWS, and Azure have all released custom-designed ARM-based CPUs for their cloud compute platforms. The expanding ARM ecosystem bolsters our multi-cloud strategy for AuthZed Dedicated and allows our production workloads to benefit from ARM's design, which prioritizes high core count and power efficiency under load.

\n

AuthZed Dedicated is our dedicated authorization infrastructure deployed adjacent to customer applications in their preferred cloud platform. This allows for the lowest latency between user applications and our permissions systems, and for the most comprehensive region support. With the availability of ARM-based compute options across the major providers, we are able to take advantage of the economic and performance advantages of ARM-based infrastructure in production:

\n
    \n
  • 20% cheaper compute costs
  • \n
  • 20-25% more efficient CPU usage for our workloads
  • \n
  • 20% higher throughput (based on a load tests at 1 million QPS on AWS Graviton EC2 instances)
  • \n
\n

End-to-End ARM Advantage

\n

From developer laptops to cloud infrastructure, ARM delivers consistent advantages throughout our engineering pipeline. For AuthZed, it's now our preferred platform for building and running authorization infrastructure that helps customers secure applications with confidence and scale efficiently.

\n

The combination of developer productivity, cost efficiency, and performance gains enables our growing startup to innovate and compete effectively. As cloud providers continue expanding ARM-based offerings and development tools mature further, we expect these advantages to compound, creating even more opportunities to deliver value through our authorization infrastructure.

\n

By embracing ARM across development and production environments, we've created a seamless experience that benefits both our team and our customers—accelerating development while delivering more performant and cost-effective services.

\n

Curious about the inspiration behind AuthZed’s modern approach to authorization? Explore the Google Zanzibar research paper with our annotations and foreword by Kelsey Hightower to learn how it all began.
\nhttps://authzed.com/z/google-zanzibar-annotated-paper

", + "url": "https://authzed.com/blog/building-better-authorization-infrastructure-with-arm", + "title": "Building Better Authorization Infrastructure with ARM: Benefits from Laptop to Cloud", + "summary": "How ARM helps AuthZed build and operate authorization infrastructure, from day-to-day productivity gains to cost-effective, performant cloud compute.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-05-14T13:00:00.000Z", + "date_published": "2025-05-14T13:00:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/zed-v0-30-2-release", + "content_html": "

Zed is the command line interface (CLI) tool that you can use to interact with your SpiceDB cluster. With it you can easily switch between clusters, write and read schemas, write and read relationships, and check for permissions. It can be launched as a standalone binary or as a Docker container. Detailed installation options documented here.

\n

Improvements in v0.30.2

\n

Over the last few months we’ve been making many improvements to it, such as:

\n
    \n
  • Adding support for compilation and validation of composable schemas
  • \n
  • Adding automatic retries
  • \n
  • Adding a new zed backup command
  • \n
  • Publishing the package to Chocolatey for all Windows users (currently in review)
  • \n
\n

And many other small fixes that are too many to list here. We are happy to announce that last week we released zed v0.30.2, which includes all of these changes.

\n

In the near future we expect to be adding support for a new test syntax in schema files, which will allow you to validate that your schema and relationships work as you expect them to. Stay tuned!

\n

As you can see, we are continuously making improvements to zed. If you see anything not working as expected, or if you have an idea for a new feature, please don’t hesitate to open an issue in https://github.com/authzed/zed. Also, while you’re at it, please give us a star!

", + "url": "https://authzed.com/blog/zed-v0-30-2-release", + "title": "Zed v0.30.2 Release", + "summary": "Zed CLI provides seamless interaction with SpiceDB clusters, allowing you to manage schemas, relationships, and permissions checks. Our v0.30.2 release adds composable schema support, automatic retries, backup functionality, and upcoming Windows package integration via Chocolatey.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-05-01T11:12:00.000Z", + "date_published": "2025-05-01T11:12:00.000Z", + "author": { + "name": "Maria Inés Parnisari", + "url": "https://github.com/miparnisari" + } + }, + { + "id": "https://authzed.com/blog/kubecon-europe-2025-highlights-navigating-authorization-challenges-in-fintech-with-authzeds-jimmy-zelinskie-and-pierre-alexandre-lacerte-from-upgrade", + "content_html": "

Navigating Authorization Challenges in FinTech with Jimmy Zelinskie and Pierre-Alexandre Lacerte

\n

At this year's KubeCon + CloudNativeCon Europe 2025 in London, AuthZed CPO Jimmy Zelinskie sat down with Pierre-Alexandre Lacerte, Director of Software Development at Upgrade, for an insightful discussion on modern authorization challenges and solutions. The interview, hosted by Michael Vizard of Techstrong TV, covers several key topics that should be on every developer's radar.

\n

Watch the Full Interview

\n

Before diving into the highlights, you can watch the complete interview on Techstrong TV here. It's packed with valuable insights for anyone interested in authorization, security, and cloud-native architectures.

\n

Key Highlights from the Conversation

\n

Origins of SpiceDB and the Zanzibar Approach

\n

Jimmy shares the origin story of AuthZed, explaining how his experience building Quay (one of the first private Docker registries) revealed fundamental challenges with authorization:

\n
\n

\"When you think about it, the only thing that makes a private Docker registry different from like a regular Docker registry where anyone can pull any container down is literally authorization... the core differentiator of that product was authorization.\"

\n
\n

The turning point came when Google published the Zanzibar paper in 2019:

\n
\n

\"We read this paper and said, this is actually how you're supposed to solve these problems. This would have solved all the problems we had building Quay.\"

\n
\n

What is Relationship-based Access Control?

\n

One of the most valuable segments of the interview explains the concept of relationship-based access control:

\n
\n

\"The approach in the Zanzibar paper is basically this idea of relationship-based access control, which is not how most people are doing things today. The idea is essentially that you can save sets of relationships inside of a database and then query that later to determine who has access.\"

\n
\n

Jimmy illustrates this with a simple example that makes the concept accessible:

\n
\n

\"Jimmy is a part of this team. This team has access to this resource. And then if I can find that chain from Jimmy through the team to that resource, that means Jimmy has access to that resource transitively through those relationships.\"

\n
\n

Why Upgrade Chose Not to Build In-House

\n

Pierre-Alexandre explains the decision-making process that led Upgrade to adopt SpiceDB rather than building an in-house solution:

\n
\n

\"We're a fintech, so we offer personal loans, checking accounts. But eventually we started developing more advanced products where we had to kind of change the foundation of our authorization model... we're kind of not that small, but at the same time we cannot allocate like 200 engineers on authorization.\"

\n
\n

Their evaluation involved looking at industry leaders:

\n
\n

\"We started looking at a few solutions actually, and then also the landscape, like what is GitHub doing? What is the Carta, Airbnb doing?... a lot of those solutions were kind of hedging into the direction of Zanzibar or Zanzibar-ish approach.\"

\n
\n

The Power of Centralization

\n

The interview highlights a critical advantage of centralized authorization systems:

\n
\n

\"The real end solution to all that is centralization. If there's only one system of record, it's really easy to make sure you've just removed that person from the one single system of record.\"

\n
\n

Pierre-Alexandre describes how Upgrade implemented this approach:

\n
\n

\"When someone leaves the company or when someone changes teams, we do have automation that would propagate the changes across the applications you have access to down to the SpiceDB instance. So we have this kind of sync infrastructure that makes sure that this is replicated within a few seconds.\"

\n
\n

Cloud-Native Requirements

\n

For companies operating in regulated industries like fintech, having a cloud-native solution is essential. Pierre-Alexandre emphasizes:

\n
\n

\"We're on Amazon EKS, so Kubernetes Foundation... For us, finding something that was cloud native, Kubernetes native was very important.\"

\n
\n

Authorization for AI: The Next Frontier

\n

One of the most forward-looking parts of the discussion addresses the intersection of authorization and AI:

\n
\n

\"The real kind of question is actually applying authorization to AI and not vice versa... now with AI, we don't have that same advantage of it just being like a couple folks. If you train a model or have tons of embeddings around your personal private data, now anyone querying that LLM has access to all that data at your business.\"

\n
\n

Upgrade is already exploring solutions:

\n
\n

\"In our lab, we're exploring different patterns, leveraging SpiceDB where we have a lot of internal documentation and the idea is to ingest those documents and tag them on SpiceDB and then leveraging some tools in the GenAI space to query some of this data.\"

\n
\n

The Bottom Line: Don't Build Your Own Authorization

\n

Perhaps the most quotable moment from the interview is Jimmy's passionate plea to developers:

\n
\n

\"If there's like one takeaway from kind of us building this business, it's that folks shouldn't be building their own authorization. Whether the tool is SpiceDB that they end up choosing or another one, like developers, they wouldn't dream of building their own database when they're building their applications. But authorization systems, they've been studied and researched and written about in computer science since the exact same time. Yet every developer thinks they can write custom code for each app implementing custom logic for a thing they don't have no background in, right? And I think this is kind of just like preposterous.\"

\n
\n

Pierre-Alexandre adds a pragmatic perspective from the customer side:

\n
\n

\"Obviously, I probably have decided to go with SpiceDB sooner. But yeah, I mean, we had to do our homework and learn.\"

\n
\n

Beyond the Highlights

\n

The full interview covers additional topics not summarized here, including:

\n
    \n
  • The distinction between authentication and authorization (and why the terms are confusing)
  • \n
  • Security implications of centralized authorization
  • \n
  • Enterprise features for enhanced control and monitoring
  • \n
  • How SpiceDB handles audit logging and security events
  • \n
\n

Join the Conversation

\n

Interested in learning more about modern authorization approaches after watching the interview?

\n\n

Don't miss this insightful conversation that challenges conventional wisdom about authorization and provides a glimpse into how forward-thinking companies are approaching these challenges. Watch the full interview now →

", + "url": "https://authzed.com/blog/kubecon-europe-2025-highlights-navigating-authorization-challenges-in-fintech-with-authzeds-jimmy-zelinskie-and-pierre-alexandre-lacerte-from-upgrade", + "title": "Techstrong.tv Interview with Jimmy Zelinskie and Pierre-Alexandre Lacerte from Upgrade", + "summary": "Watch AuthZed CPO Jimmy Zelinskie and Upgrade's Pierre-Alexandre Lacerte discuss modern authorization challenges, relationship-based access control, and why companies shouldn't build their own authorization systems in this insightful KubeCon Europe 2025 interview with Techstrong.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-04-08T16:15:00.000Z", + "date_published": "2025-04-08T16:15:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/meet-dibs-the-mascot-bringing-spicedb-to-life", + "content_html": "

We're pleased to introduce you to the official SpiceDB mascot – the Muad'dib, or Dibs for short. As we prepare for KubeCon + CloudNativeCon EU in London, we're unveiling this distinctive character who will represent our project in meaningful ways.

\n

\"\"

\n

Why a Muad'dib?

\n

The name \"Muad'dib\" continues our tradition of referencing Frank Herbert's Dune series. For those unfamiliar with Dune, the Muad'dib is a small desert mouse known for its resilience and adaptability—qualities we strive to incorporate into SpiceDB.

\n

With its distinctive oversized ears and agile movements, the Muad'dib is far more than just a charming emblem. In the unforgiving desert, every step matters, and this remarkable creature's fast, efficient navigation mirrors how SpiceDB processes complex data in real time. Those attentive ears serve as a reminder to remain vigilant and responsive, embodying survival instincts honed in the harshest environments.

\n

Much like SpiceDB's approach to authorization challenges, the Muad'dib transforms obstacles into opportunities. This desert-dwelling creature represents our commitment to resilience, speed, and a collaborative spirit – all values that drive SpiceDB forward in the cloud-native ecosystem.

\n

Dibs at KubeCon + CloudNativeCon EU

\n

We will be at KubeCon + CloudNativeCon in London so stop by our booth #: N632 to pick up your very own Dibs swag.

\n

And join us for our scheduled activities:

\n

April 2, 2025 - Live AMA

\n

Kelsey Hightower AMA at our booth #: N632

\n

\"\"

\n

April 3, 2025 - Party hosted by AuthZed

\n

Come party with AuthZed, Spotify, Rootly and Infiscal at the Munich Cricket Club Canary Wharf.

\n

\n

\"\"

\n

We would love to talk with you about how we can help fix your access control and provide the infrastructure necessary to support your applications.

\n

Get Involved with Dibs and SpiceDB

\n

We look forward to seeing how our community connects with Dibs the Muad'dib. Here's how you can get involved:

\n
    \n
  1. Follow Us Online: We post project updates and industry news regularly on LinkedIn, BlueSky, and X.
  2. \n
  3. Visit Us at KubeCon: Stop by our booth #: N632 at KubeCon + CloudNativeCon EU in London to see Dibs in person and collect exclusive Muad'dib merchandise.
  4. \n
  5. Contribute to SpiceDB: Consider contributing to SpiceDB and continue the development of our open source project.
  6. \n
  7. Join Our Community: Connect with other SpiceDB users and developers in our Discord server.
  8. \n
\n

This creature represents not just our project, but the spirit of our community – adaptable, resilient, and ready to navigate complex challenges.

\n

Welcome, Dibs.

", + "url": "https://authzed.com/blog/meet-dibs-the-mascot-bringing-spicedb-to-life", + "title": "Meet Dibs: The Mascot Bringing SpiceDB to Life", + "summary": "Meet Dibs the Muad'dib, SpiceDB's new mascot that embodies our commitment to resilience, adaptability, and precision in solving complex authorization challenges. Drawing inspiration from Frank Herbert's Dune universe, this vigilant desert creature symbolizes how SpiceDB navigates the harsh terrain of modern access control with efficiency and intelligence.", + "image": "https://authzed.com/images/upload/blog-meet_dibs-2x.png", + "date_modified": "2025-03-25T12:17:00.000Z", + "date_published": "2025-03-25T12:17:00.000Z", + "author": { + "name": "Corey Thomas", + "url": "https://www.linkedin.com/in/cor3ythomas/" + } + }, + { + "id": "https://authzed.com/blog/the-evolution-of-expiration", + "content_html": "

Feature Highlight: Relationship Expiration

\n

We are excited to announce that as of the SpiceDB v1.40 release, users now have access to a new experimental feature: Relationship Expiration. When writing relationships, requests can now include an optional expiration time, after which a relationship will be treated as removed, and eventually automatically cleaned up.

\n

The evolution of expiration

\n

Even when first setting out to create SpiceDB, there was never any doubt whether or not users would want time-bound access control to their resources. However, the inspiration for SpiceDB, Google's Zanzibar system, has no public documentation for how this functionality is built. As our initial goals for the SpiceDB project were to be as faithful to Google's design as possible, we initially left expiration as an exercise to the user.

\n

Without explicit support within SpiceDB, users could still use external systems like workflow engines (e.g. Temporal) to schedule calls to the SpiceDB DeleteRelationships or WriteRelationships APIs in order to solve this problem. This is a perfectly valid way to solve this problem, but it has a major tradeoff: users must adopt yet another system to coordinate their usage of the SpiceDB API.

\n

After we had successfully reached our goal of being the premier implementation of the concepts expressed in the Google Zanzibar paper, we turned our focus to improving developer experience and more real-world requirements outside of the walls of Google. This led us to collaborating with Netflix on a system for supporting lightweight policies to more effectively model ABAC-style use cases. This design came to be known as Relationship Caveats. Caveats allow SpiceDB users to write conditional relationships that exist depending on whether a CEL expression evaluates to true while their request is being processed. With the introduction of Caveats, SpiceDB had its first way to create time-bounding without relying on any external system. The use case was so obvious, even our first examples of Caveats demonstrated how to implement time-bounded relationship expiration.

\n

As more SpiceDB users adopted Caveats, we began to acknowledge some trends in its usage. Many folks didn't actually need or want the full expressiveness of policy; instead they cared solely about modelling expiration itself. Eventually it became obvious that expiration was its own fully-fledged use case. If we could craft an experience specifically for expiration, we could steer many folks away from some of the tradeoffs associated with caveats. If you still need caveats for reasons other than expiration and you're wondering if relationships support both caveats and expiration simultaneously, they do!

\n

What's going on under-the-hood for Relationship Expiration?

\n

If you've spent time reading some of the deeper discussions on SpiceDB internals or studying other systems, you might be familiar with the fact that time is incredibly nebulous in distributed systems. Distributed systems typically eschew \"wall clocks\" altogether. Instead, for correctness they need to model time based on the ordering of events that occur in the system. This observation, among others, ultimately led Leslie Lamport to win a Turing Award. SpiceDB is no exception to this research: the opaque values encoded into SpiceDB's ZedTokens act as logical clocks used to provide consistency guarantees throughout the system.

\n

If the problem here isn’t already clear: fundamentally, relationship expiration is tied to wall clock time, but distributed systems research proves this is a Bad Idea™. In order to avoid any inconsistencies caused by the skew in synchronization of clocks across machines, SpiceDB implements expiration by pushing as much logic into the underlying datastore as possible. For a datastore like PostgreSQL, there is no longer a synchronization problem because there's only one clock that matters: the one on the leader's machine. Some datastores even have their own first-class expiration primitives that SpiceDB can leverage in order to offload this logic entirely while ensuring that the removal of relationships are done as efficiently as possible. This strategy is only possible because of SpiceDB's unique architecture of reusing other existing databases for its storage layer rather than the typical disk-backed key-value store.

\n

Trying out Relationship Expiration

\n

There are only a few steps required to try out expiration once you've upgraded to SpiceDB v1.40:

\n
    \n
  1. Enable the experimental flag when running SpiceDB
  2. \n
\n
spicedb serve --enable-experimental-relationship-expiration [...]\n
\n
    \n
  1. Annotate that you want to use first-class expiration in your schema.
  2. \n
\n
use expiration\u000b\u000b\n\ndefinition folder {}\u000b\ndefinition resource {\n  relation parent: folder\n}\n
\n
    \n
  1. Annotate the relations in your schema where you want to support expiration.
  2. \n
\n
use expiration\u000b\u000b\n\ndefinition folder {}\u000b\n  definition resource {\n  relation parent: folder with expiration\n}\n
\n
    \n
  1. Provide a timestamp for the `OptionalExpiresAt` field when writing relationships.
  2. \n
\n
WriteRelationshipsRequest { Updates: [\n  RelationshipUpdate {\n    Operation: CREATE\n    Relationship: {\n      Resource: { ObjectType: \"resource\", ObjectId: \"123\", },\n      Relation: \"parent\",\n      Subject: { ObjectType: \"folder\", ObjectId: \"456\", },\n      OptionalExpiresAt: \"2025-12-31T23:59:59Z\"\n      }\n    }]\n}\n
\n

The journey for peak performance continues

\n

Relationship Expiration is a great example of our never-ending journey to achieve the best possible performance for SpiceDB users. As SpiceDB is put to the test in an ever-increasing number of diverse enterprise use-cases, we learn new things about where optimizations should be made in order to deliver the best product for scaling authorization. Sometimes it requires going back to the drawing board on a problem we thought we had previously solved and totally reconsidering its design. With that, I encourage you to go out and experiment with Relationship Expiration so that we learn even more about the problemspace and continue refining our approach.

", + "url": "https://authzed.com/blog/the-evolution-of-expiration", + "title": "The Evolution of Expiration", + "summary": "We are excited to announce that as of the SpiceDB 1.40 release, users now have access to a new experimental feature: Relationship Expiration. When writing relationships, requests can now include an optional expiration time, after which a relationship will be treated as removed, and eventually automatically cleaned up.", + "image": "https://authzed.com/images/blogs/blog-eng-relationship-expiration-hero-2x.png", + "date_modified": "2025-02-13T10:16:00.000Z", + "date_published": "2025-02-13T10:16:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/build-time-bound-permissions-with-relationship-expiration-in-spicedb", + "content_html": "

Today we are announcing the experimental release of Relationship Expiration, which is a straightforward, secure, and dynamic way to manage time-bound permissions directly within SpiceDB.

\n

Building secure applications is hard, especially when it comes to implementing temporary access management for sensitive data. You need to grant the right level of access to the right people for the right duration, without creating long-term vulnerabilities or drowning in administrative overhead.

\n

Consider the last time you needed to give a contractor access to your brand guidelines, a vendor access to a staging environment, or a new employee access to onboarding materials. The usual workarounds – emailing files, uploading to external systems, or (please, please don’t) sharing logins – quickly become a tangled mess of version control nightmares, security risks, and administrative headaches. And what happened when you completed the project? How did you guarantee that access gets promptly revoked? Leaving lingering access privileges hanging around is an AppSec war room waiting to happen.

\n

We’re helping application development teams solve this problem with this powerful new feature in SpiceDB v1.40.

\n

\"Authorization is essential for building secure applications with advanced sharing capabilities,\" said Larry Carvalho, Principal Consultant and Founder at RobustCloud. \"SpiceDB, inspired by Google's approach to authorization, provides developers with a much-needed feature for managing fine-grained access control. By leveraging AuthZed’s expertise, developers can build the next generation of applications with greater efficiency, security, and flexibility.\"

\n

Beyond workarounds: a first class solution

\n

While workarounds exist – scheduling API calls with external tools like Temporal or crafting complex policies – they add complexity and can be difficult to manage and deploy at scale (think 10,000 relationships generated and refreshed every 10 minutes). SpiceDB's Relationship Expiration provides first-class support for building time-bound permissions, leveraging SpiceDB’s powerful relationship-based approach.

\n

As the name suggests, expirations are attached as a trait to relationships between subjects and resources in SpiceDB’s graph-based permissions evaluation engine. Once the relationship expires, SpiceDB automatically removes it. Without this built-in support, conditional time-bound relationships in a Zanzibar-style schema clutter the permissions graph, bloating the system and impacting performance.

\n

Why you should be building time-bound permissions (with SpiceDB)

\n

Collaborate productively and securely

\n

Time-bound access helps teams to collaborate securely and efficiently. By eliminating the friction of manual access management, it frees up valuable time and resources while minimizing the risk of human error. Knowing that access will automatically expire fosters a culture of confident sharing, removing the hesitation that can lead to information silos and slower project cycles. Additionally, just-in-time access with session-based privileges streamlines workflows and minimizes the risk of unauthorized access.

\n

Dynamic permissions

\n

Put access control in the hands of your users: they can define expiration limits for the resources they manage, unlocking powerful workflows like time-limited review cycles or project-based access. A designer, for example, could grant edit access to a file for a specific review period, with access automatically revoked afterward. This granular control enhances security by minimizing the window of opportunity for unauthorized access and fosters a culture of security awareness. Leave a positive impression with custom permissions options that welcome a broad range of use cases.

\n

Optimize permissions systems

\n

With millions of users and billions of resources, authorization can become a major performance bottleneck, especially since permissions checks sit in the critical path between user input and service response. By automatically removing expired relationships, SpiceDB reduces the size of its database and load on its system, leading to more performant authorization checks and lower costs.

\n

Learn more today

\n

Want to learn more TODAY? Join Sohan, AuthZed technical evangelist, and Joey Schorr, one of the founders of AuthZed, during our biweekly Office Hours livestream at 9 am PT / 12 pm ET on February 13th! We hope to see you there.

\n\n

Or, hop over to Jimmy Zelinskie’s blog post to learn more about how to implement expiring relationships and try them out in SpiceDB today.

\n

Don’t let relationships linger past their expiration date!

\n

You may have noticed that we've lined up this launch just in time for Valentine’s Day. Most relationships between humans do, sadly, have an expiration date… To recognize the (somewhat) unfortunate timing of this release, we’ve compiled a Spotify list of songs sourced from the AuthZed team just for those nursing broken hearts this season. And if you’re one of the lucky ones celebrating, hey, it’s fun music to jam to while you learn SpiceDB.

\n\n

If you haven’t already, give SpiceDB a star on GitHub, or follow us on LinkedIn, X, or BlueSky to stay up to date on all things AuthZed. Or ready to get started? Schedule a call with us to talk about how we can help with your authorization needs.

", + "url": "https://authzed.com/blog/build-time-bound-permissions-with-relationship-expiration-in-spicedb", + "title": "Build Time-Bound Permissions with Relationship Expiration in SpiceDB", + "summary": "Today we are announcing the experimental release of Relationship Expiration, which is a straightforward, secure, and dynamic way to manage time-bound permissions directly within SpiceDB. \n", + "image": "https://authzed.com/images/blogs/blog-relationship-expiration-hero-2x.png", + "date_modified": "2025-02-13T10:16:00.000Z", + "date_published": "2025-02-13T10:16:00.000Z", + "author": { + "name": "Jess Hustace", + "url": "https://twitter.com/_jessdesu" + } + }, + { + "id": "https://authzed.com/blog/deepseek-balancing-potential-and-precaution-with-spicedb", + "content_html": "

DeepSeek has emerged as a phenomenon since its announcement in late December 2024 by hedge fund company High-Flyer. The AI industry and general public have been captivated by both its capabilities and potential implications.

\n

Security has been at the forefront of recent conversation due to reports from Wiz that the DeepSeek database is leaking sensitive information, including chat history as well as geopolitical concerns. Even RedMonk analyst Stephen O’Grady discussed DeepSeek and the Enterprise focusing on considerations for business adoption.

\n

At AuthZed, we recognize that trust and security fundamentally shape how organizations evaluate AI models, which is why we're sharing our perspective on this crucial discussion.

\n

The DeepSeek Phenomenon

\n

What makes DeepSeek particularly noteworthy is its unique combination of features. As an open-source model, it demonstrates performance comparable to frontier models from industry leaders like OpenAI and Anthropic, yet achieves this with (reportedly) significantly lower training costs. The R1 version exhibits impressive reasoning capabilities, further challenging conventional assumptions about the infrastructure investments required for advancing LLM performance.

\n

Balancing Potential and Precaution

\n

While these factors drive DeepSeek’s popularity, they’ve also drawn skepticism alongside geopolitical considerations based on DeepSeek’s origin. The uncertainty surrounding the source of training data and potential biases in responses warrants careful consideration. A recent data breach of the hosted service has heightened privacy concerns, particularly given the official hosted service’s terms of service permit user data retention for future model training.

\n

Despite the concerns, users and companies increasingly express interest in exploring its capabilities. Organizations seeking to leverage DeepSeek's capabilities while maintaining data security can adopt permissions systems to define data access controls. This strategy is especially relevant for applications built on DeepSeek's large language models, where protecting sensitive information is paramount.

\n

SpiceDB: A Solution for Secure AI Integration

\n

SpiceDB offers a robust framework for organizations integrating AI capabilities. Its fine-grained permissions help avoid oversharing by letting you precisely define which data the model can and cannot access. This granular control extends beyond data access - you can prevent excessive agency by explicitly defining the scope of actions a DeepSeek-based agent is permitted to take. This dual approach to security - controlling both data exposure and action boundaries - makes SpiceDB particularly valuable for organizations that want to leverage DeepSeek’s capabilities but in a controlled environment.

\n

Practical Implementation

\n

To help organizations get started, we've created a demo notebook showcasing SpiceDB integration with a DeepSeek-based RAG system: https://github.com/authzed/workshops/tree/deepseek/secure-rag-pipelines

\n

For further exploration and community support, join our SpiceDB Discord community to connect with other developers implementing secure AI applications.

", + "url": "https://authzed.com/blog/deepseek-balancing-potential-and-precaution-with-spicedb", + "title": "DeepSeek: Balancing Potential and Precaution with SpiceDB", + "summary": "DeepSeek has emerged as a phenomenon since its announcement in late December 2024 and security has been at the forefront of recent conversation. At AuthZed, we recognize that trust and security fundamentally shape how organizations evaluate AI models, which is why we're sharing our perspective on this crucial discussion.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-01-31T07:56:00.000Z", + "date_published": "2025-01-31T07:56:00.000Z", + "author": { + "name": "Sam Kim", + "url": "https://github.com/samkim" + } + }, + { + "id": "https://authzed.com/blog/2024-soc2-reflection", + "content_html": "

I'm happy to announce that AuthZed recently renewed our SOC2 compliance and our SOC2 Type 2 and SOC3 reports are now available on security.authzed.com.

\n

Having just endured the audit process again, I figured it would be a good time to reflect on my personal feelings toward compliance and how my opinion has evolved.

\n

An unbiased description of SOC2

\n

If you're reading this now and aren't familiar with SOC2 and SOC3, I'll give you an overview by someone that isn't trying to sell you a compliance tool (feel free to skip this section):

\n

SOC (System and Organization Controls) is a suite of annual reports that result from conducting an audit of the internal controls that you use to guarantee security practices at your company. An example of an \"internal control\" is a company-wide policy that enforces that \"all employees have an anti-virus installed on their devices\". Controls vary greatly and can be automated by using software like password managers and MDM solutions, but some will always require human intervention, such as performing quarterly security reviews and annual employee performance reviews.

\n

In the tech industry, SOC2 is the standard customers expect (or ISO27001 if you're in the EU, but they are similar enough that you often only need either one). As I wrote this, it came to my attention that I have no idea what SOC1 is, so I looked it up to discover that it is apparently a financial report which I've never heard of customers requesting in the tech industry. SOC3 is a summary of a SOC2 report that contains less detail and is designed to be more publicly sharable so that you don't necessarily need to sign an NDA to get some details. SOC2 comes in two variants \"Type 1\" and \"Type 2\". It's fairly confusing, but this is just shorthand for how long the audit period was. Type 1 means that the audit looked at the company at one point in time, while Type 2 means that the auditor actually monitored the company over a period of time usually 6 or 12 months.

\n

What engineers think about SOC2 Compliance

\n

To engineering organizations, compliance is often seen as a nuisance or a distraction from shipping code that moves the needle for actual security issues. Software engineers are those deepest in the weeds, so they have the code that they're familiar with at the top of mind when you ask where security concerns lie. Because I knew where the bodies were buried when I first transitioned my career to product management from engineering, I always tried to push back and shield my team from having to deliver compliance features. The team celebrated this as a win for focus, but we never got to fully understand the externalities of this approach.

\n

Fast forward a few years, I've now gotten much wider exposure to the rest of the business functions at a technology company. From the overarching view of an executive, the perspective of the software engineer seems quite amiss. If you asked an engineer what they're concerned about, it might be that they quickly used the defaults for bcrypt and didn't spend the time evaluating the ideal number of bcrypt rounds or alternative algorithms. This perspective is valuable, but can also be missing the forest for the trees; it's far easier to perform phishing attacks on a new hire than it is to reverse engineer the cryptography in their codebase. That simple fact makes it clear that if you haven't already addressed the foundational security processes at your business, it doesn't matter how secure the software you're building is.

\n

Compliance is ultimately about one thing: trust

\n

All of that said, AuthZed's engineering-heavy team is not innocent from this line of thinking, especially since our core product is engineering security infrastructure. However, if we put our egos aside, there is one thing that reigns supreme regardless of the product you're building: the trust you build with your customers.

\n

The compliance industry was never trying to hide that its end goal is purely trust in processes. SOC2 is defined by the American Institute of Certified Public Accountants and not a cybersecurity standards body; this is because compliance is about ensuring processes at your business and not finding remote code execution in your codebase. That doesn't mean that compliance cannot uncover deep code issues because SOC2 audits actually require you to perform an annual penetration test from an actual cybersecurity vendor. Coding vulnerabilities are only one aspect of the comprehensive approach that compliance is focused on.

\n

Without compliance, our industry would be stuck having to blindly trust that vendors are following acceptable security practices. By conforming to the processes required for certifications like SOC2, we can build trust with our partners and customers as well as prove the maturity of our products and business. While it may feel like toil at times, it's a necessary evil to ensure consistency across our supply chains.

\n

The final thought I'd like to leave you with is the idea that compliance isn't a checkbox to do business. It's a continuous process where you offer transparency to your customers to prove that they should trust you. I'm looking forward to seeing if my opinions change next renewal.

\n

I'd like to thank the teams at SecureFrame and Modern Assurance who we've collaborated with during this last audit as well as all of the vendors and data subprocessors we rely on to operate our business everyday.

", + "url": "https://authzed.com/blog/2024-soc2-reflection", + "title": "Our SOC2 Renewal and Reflections on Compliance", + "summary": "I'm happy to announce that AuthZed recently renewed our SOC2 compliance and our SOC2 Type 2 and SOC3 reports are now available on security.authzed.com.\nHaving just endured the audit process again, I figured it would be a good time to reflect on my personal feelings toward compliance and how my opinion has evolved.\n", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-01-07T20:20:00.000Z", + "date_published": "2025-01-07T20:20:00.000Z", + "author": { + "name": "Jimmy Zelinskie", + "url": "https://twitter.com/jimmyzelinskie" + } + }, + { + "id": "https://authzed.com/blog/the-dual-write-problem", + "content_html": "

Overview

\n

The dual-write problem presents itself in all distributed systems. A system that uses SpiceDB for authorization and also has an application database (read: most of them) is a distributed system. Working around the dual-write problem typically requires a non-trivial amount of work.

\n

What is the Dual-Write Problem?

\n

If you've heard this one before, feel free to skip down where we talk about solutions and approaches to the dual-write problem. If it's your first time, welcome!

\n

Let's consider a typical monolithic web application. Perhaps it's for managing and sharing files and folders, which makes it a natural candidate for a relation-based access control system like SpiceDB. The application has an upload endpoint that looks something like the following:

\n
def upload(req):\n  validate_request(req)\n  with new_transaction() as db:\n    db.write_file(req.file)\n  return Response(status=200)\n
\n

All of the access control logic is neatly contained within the application database, so no other work needed to happen up to this point. However, we want to start using SpiceDB in anticipation of the application growing more complex and services splitting off of our main monolith.

\n

We start with a simple schema:

\n
definition user {}\n\ndefinition folder {\n  relation viewer: user\n  permission view = viewer\n}\n\ndefinition file {\n  relation viewer: user\n  relation folder: folder\n  permission view = viewer + folder->viewer\n}\n
\n

Note that if a user is a viewer of the folder, they are able to view any file within the folder. That means that we'll need to keep SpiceDB updated with the relationships between files and folders, which is held in the folder relation on the file.

\n

\"Picture

\n

That doesn't sound so bad. Let's go and implement it:

\n
def upload(req):\n  validate_request(req)\n  with new_transaction() as db:\n    file_id = db.write_file(req.file)\n    write_folder_relationship(\n      file_id=file_id\n      folder_id=req.folder_id\n    )\n    \n  return Response(status=200)\n
\n

We've got a problem, though. What happens if the server crashes? We're going to use a server crash as an example problem because it's relatively conceptually simple and is also something that's hard to recover from. Let's mark up the function and then consider what happens if the server crashes at each point:

\n
def upload(req):\n  validate_request(req)\n  # point 1\n  with new_transaction() as db:\n    file_id = db.write_file(req.file)\n    # point 2\n    write_folder_relationship(\n      file_id=file_id\n      folder_id=req.folder_id\n    )\n    # point 3\n  # point 4 (outside of the transaction)\n  return Response(status=200)\n
\n

Note that the points refer to the boundaries between lines of code, rather than pointing at the line of code above or below them.\nHere's an alternative view of things in a sequence diagram:

\n

\"Application

\n

If the server crashes at points #1 or #4, we're fine - the request will fail, but we're still in a consistent state. The application server and SpiceDB agree about what the system should look like. If the server crashes at point #2, we're still okay - we've opened a database transaction but we haven't committed it, so the database will roll back the transaction and everything will be fine. If we crash at point #3, however, we're in a state where we've written to SpiceDB but we haven't committed the transaction to our database, and now SpiceDB and our database disagree about the state of the world.

\n

There isn't a neat way around this problem within the context of the process, either. This blog post goes further into potential approaches and their issues if you're curious. Things like adding a transactional semantic to SpiceDB or reordering the operations move the problem around but don't solve it, because there's still going to be some boundary in the code where the process could crash and leave you in an inconsistent state.

\n

Note as well that there's nothing particularly unique about the dual-write problem in systems using SpiceDB and an application database, either. If we were writing to two different application databases, or to an application database and to a cache, or to two different RPC-invoked services, we still have the same issue.

\n

So what can we do?

\n

We can solve the dual-write problem in SpiceDB using a few different approaches, each with varying levels of complexity, prerequisites, and tradeoffs to be made

\n

Do Nothing

\n

Doing nothing is an option that may be viable in the right context.\nThe sort of data inconsistency where SpiceDB and your application database disagree can be hard to diagnose.\nHowever, if there are mechanisms by which a user could recognize that something is wrong and remediate it in a timely manner, or if the authorized content in question isn't particularly sensitive, you may be able to run a naive implementation and avoid the complexity associated with other approaches.\nThe more stable your platform is, the more likely this is to cause fewer issues.

\n

Out-of-band consistency checking

\n

Out-of-band consistency checking would be one step beyond \"doing nothing.\"\nIf you have a source of truth that SpiceDB's state is meant to reflect in a given context, you can check that the two systems agree on a periodic basis.\nIf there's disagreement, the issues can be automatically remediated or flagged for manual intervention.

\n

This is a conceptually simple approach, but it's limited by both the size of your data and the velocity of changes to your data.\nThe more data you have, the more expensive and time-consuming the reconciliation process becomes.\nIf the data change rapidly, you could have false positives or false negatives when a change has been applied\nto one system but not the other.\nThis could theoretically be handled through locking or otherwise pinning SpiceDB and your application's database so that their data\nreflect the same version of the world while you're checking their associated states,\nbut that will greatly reduce your ability to make writes in your system.\nThe sync process itself can become a source of drift or inconsistency.

\n

Make SpiceDB the source of truth

\n

For certain kinds of relationships and data, it may be sufficient to make SpiceDB the source of truth for that particular information.\nThis works best for data that matches SpiceDB's storage and access model: binary presence or absence of a relationship between two objects, and no requirement to sort those relationships or filter by anything other than which subject or object they're associated with.

\n

If your data meet those conditions, you can remove the application database from the question and make a single write to SpiceDB and avoid the dual-write problem entirely.

\n

For example, if we wanted to add a notion of a file \"owner\" to our example application, we probably wouldn't need an owner column with a foreign key to a user ID in our application database.\nInstead, we could represent the relationship entirely with an owner relation in SpiceDB, such that an API handler for adding or updating an owner of a file or folder would only talk to SpiceDB and not to the application database.\nBecause only one system is being written to in the handler, we avoid the dual-write problem.

\n

\"SpiceDB

\n

The limitation here is that if you wanted to build a user interface where a user can see a table of all of the files they own, you wouldn't be able to filter, sort, or paginate\nthat table as easily, because SpiceDB isn't a general-purpose database and doesn't support that functionality in the same way.

\n

Event Sourcing/Command-Query Responsibility Segregation (CQRS)

\n

Event sourcing and CQRS are related ideas that involve treating your system as eventually consistent.\nRather than an API call being a procedure that runs to completion, an API call becomes an event that kicks off a chain of actions.\nThat event goes into an event stream, where consumers (to use Kafka's language) can pick them up and process them, which may involve producing new events.\nMultiple consumers can listen to the same topic.\nThe events flow through the system until they've all been processed, and the surrounding runtime ensures that nothing is dropped.

\n

There's a cute high-level illustration of how an event sourcing system works here: https://www.gentlydownthe.stream/

\n

In our example application, it might look like the following:

\n
    \n
  1. A client makes a request to create a file in a folder
  2. \n
  3. The API handler receives the request and puts a message into the event stream that includes the information about the file
  4. \n
  5. One consumer picks up the creation message and writes a relation to SpiceDB between the file and its folder
  6. \n
  7. Another consumer picks up the creation message and writes the information about the file to the application database
  8. \n
\n

The upside is that you're never particularly worried about the dual-write problem, because any individual failure of a subscriber can be recovered and re-run.\nEverything just percolates through until the system arrives at a new consistent state.

\n

The downside is that you can't treat API calls as RPCs.\nThe API call doesn't represent a change to the state of your system, but rather a command or request that will\neventually result in your desired changes happening.\nYou can work around this by having the client or UI listen to an event stream from the backend,\nsuch that all you're doing is passing messages back and forth, but this often requires\nsignificant rearchitecture, and not every runtime is amenable to this architecture.

\n

Here are some examples of event queues that you might see in an event sourcing system:

\n\n

Durable Execution Environments

\n

A durable execution environment is a set of software tools that let you pretend that you're writing relatively simple transactional logic within your application while abstracting over the concerns involved in writing to multiple services. They promise to take care of errors, rollbacks, and coordination, provided you've written the according logic into the framework.

\n

An upside is that you don't have to rearchitect your system if you aren't already using the paradigms necessary for event sourcing.\nThe code that you write with these systems tends to be familiar, procedural, and imperative, which lowers the barrier to entry\nfor a dev trying to solve a dual-write problem.

\n

A downside is that it can be difficult to know when your write has landed, because you're effectively dispatching it off to a job runner.\nThe business logic is moved off of the immediate request path. This means that the result of the business logic is also off of the request\npath, which raises a question of what you would return to an API client.

\n

Some durable execution environments are explicitly for running jobs and don't give you introspection into the results;\nothers can be inserted into your code in such a way that you can wait for the result and pretend that everything happened synchronously.\nNote that this means that the associated runtime that handles those jobs becomes a part of the request path, which can carry operational overhead.

\n

Temporal, Restate, Windmill, Trigger.dev, and Inngest are a few examples of durable execution environments. You'll have to evaluate which one best fits your architecture and infrastructure.

\n

Transactional Outbox Patterns

\n

A transactional outbox pattern is related to both Event Sourcing and Durable Execution, in that it works around the dual-write problem\nthrough eventual consistency.\nThe idea is that within your application database, when there's a change that needs to be written to SpiceDB, you write to an outbox table, which is an append-only log of modifications that should happen to SpiceDB.\nThat write can happen within the same database transaction, which means you don't have the dual write problem.\nYou then read that log (or subscribe to a changestream) with a separate process which marks the entries as it reads them and then submits them to SpiceDB through some other mechanism.

\n

As long as this process is effectively single-threaded and retries operations until they succeed (which is helped by SpiceDB allowing for idempotent writes with its TOUCH operation), you have worked around the dual-write problem.

\n

One of the most commonly-used tools in a system based on the transactional outbox pattern is Debezium.\nIt watches changes in an outbox table and submits them as events to Kafka, which can then be consumed downstream to write to another system.

\n

Some other resources are available here:

\n\n

So which one should I choose?

\n

Unfortunately, when making writes to multiple systems, there are no easy answers. SpiceDB isn't unique in this regard, and most systems of sufficient complexity will eventually run into some variant of this problem. Which solution you choose will depend on the shape of your existing system, the requirements of your domain, and the appetite of your organization to make the associated changes. We still think it's worth it - when you centralize the data required for authorization decisions, you get big wins in consistency, performance, and safety. It just takes a little work.

", + "url": "https://authzed.com/blog/the-dual-write-problem", + "title": "The Dual-Write Problem", + "summary": "The dual-write problem is present in any distributed system and is difficult to solve. We discuss where the problem arises and several approaches.", + "image": "https://authzed.com/images/blogs/blog-featured-image.png", + "date_modified": "2025-01-02T12:48:00.000Z", + "date_published": "2025-01-02T12:48:00.000Z", + "author": { + "name": "Tanner Stirrat", + "url": "https://www.linkedin.com/in/tannerstirrat/" + } + }, + { + "id": "https://authzed.com/blog/spicedb-amazon-ecs", + "content_html": "

Deploy SpiceDB to Amazon ECS

\n

Amazon Elastic Container Service (ECS) is a fully managed container orchestration service that simplifies your deployment, management, and scaling of containerized applications. This blog will illustrate how you can install SpiceDB on Amazon ECS and is divided into 3 parts:

\n
    \n
  1. A quickstart guide - Intended as a learning exercise
  2. \n
  3. A prod-friendly CloudFormation template
  4. \n
  5. Limitations of using Amazon ECS as a deployment target for SpiceDB
  6. \n
\n

It's important to note that this guide is meant for:

\n
    \n
  • You want to learn how to deploy SpiceDB with Amazon ECS
  • \n
  • Your current infrastructure is on Amazon ECS and you want to create a Proof of Concept with SpiceDB.
  • \n
\n

It is not recommended to use SpiceDB on ECS as a production deployment target. See the final section of this post for more details.

\n

Here are the prerequisites to follow this guide:

\n
    \n
  1. An Amazon Web Services (AWS) account with relevant permissions
  2. \n
  3. AWS CLI
  4. \n
  5. The zed CLI (this is optional if you’re writing permissions via code
  6. \n
  7. Docker installed on your system
  8. \n
\n

Quickstart

\n

Let’s start by pushing the SpiceDB Docker image to Amazon Elastic Container Registry (ECR)

\n

Push a SpiceDB Image to Amazon ECR

\n

Create an ECR Repository Using the AWS Console

\n
    \n
  1. Go to the ECR Console in the AWS Management Console.
  2. \n
  3. Click on Create repository.
  4. \n
  5. Enter a name for the repository, like spicedb, and configure any settings (like image scanning or encryption).
  6. \n
  7. Click Create repository to finish.
  8. \n
\n

\"Create

\n

Alternately, you can create this using the AWS CLI with the following command:

\n
aws ecr create-repository --repository-name spicedb --region <your-region>\n
\n

Authenticate Docker to Amazon ECR

\n

Amazon ECR requires Docker to authenticate before pushing images.\nRetrieve an authentication token and authenticate your Docker client to your registry using the following command (you’ll need to replace region with your specific AWS region, like us-east-1)

\n
aws ecr get-login-password --region <region> | docker login --username AWS --password-stdin <account-id>.dkr.ecr.<region>.amazonaws.com\n
\n

Tag the Docker Image

\n
    \n
  • Pull and build the SpiceDB image from Docker Hub using this command
  • \n
\n
docker pull authzed/spicedb:latest\ndocker build -t spicedb .\n
\n
    \n
  • After the build completes, tag your image so that you can push it to the ECR repository.
  • \n
\n
docker tag spicedb:latest <account-id>.dkr.ecr.<region>.amazonaws.com/spicedb:latest\n
\n

Note: If you are using an Apple ARM-based machine (Ex: Mac with Apple Silicon) and you eventually want to deploy it to a x86-based instance you need to build this image for multi-architecture using the buildx command.

\n

You cannot use docker buildx build with an image reference directly.\nInstead, create a lightweight Dockerfile to reference the existing image by adding this one line:

\n

FROM authzed/spicedb:latest

\n

and save it in the directory. While in that directory, build and push a Multi-Architecture Image using the buildx command:

\n
docker buildx build --platform linux/amd64,linux/arm64 -t <account-id>.dkr.ecr.<region>.amazonaws.com/spicedb:latest --push .\n
\n

Push the Image to ECR

\n
    \n
  • Once the image is tagged, push it to your newly-created ECR repository:
  • \n
\n
docker push <account-id>.dkr.ecr.<region>.amazonaws.com/spicedb:latest\n
\n

Replace account-id and region with your AWS account ID and region.

\n
    \n
  • Go to the Amazon ECR Console and navigate to the spicedb repository. Verify that the spicedb:latest image is available.
  • \n
\n

Note: All the above commands are pre-filled with your account details and can be seen by opening your repository on ECR and clicking the View push commands button

\n

\"View

\n

Run a SpiceDB task in an ECS Cluster

\n

Create an Amazon ECS Cluster

\n

Using AWS Console:

\n
    \n
  1. Go to the ECS console and click ‘Create cluster’
  2. \n
  3. Give it a name and namespace (optional)
  4. \n
  5. For this guide, we will use ‘AWS Fargate (serverless)’ as the infrastructure for our cluster
  6. \n
\n

Alternately, you can create this using the AWS CLI with this command:

\n
aws ecs create-cluster --cluster-name spicedb-cluster\n
\n

\"Create

\n

Create IAM Roles (if they don’t exist)

\n

If you don’t see these roles, you can create them as follows:

\n

Creating ecsTaskExecutionRole:

\n

The ECS Task Execution Role is needed for ECS to pull container images from ECR, write logs to CloudWatch, and access other AWS resources.

\n
    \n
  1. \n

    Go to the IAM Console.

    \n
  2. \n
  3. \n

    Click Create Role.

    \n
  4. \n
  5. \n

    For Trusted Entity Type, choose AWS Service.

    \n
  6. \n
  7. \n

    Select Elastic Container Service and then Elastic Container Service Task.

    \n
  8. \n
  9. \n

    Click Next and attach the following policies:

    \n
      \n
    • AmazonECSTaskExecutionRolePolicy
    • \n
    \n
  10. \n
\n

Or use these commands using AWS CLI:

\n
aws iam create-role --role-name ecsTaskExecutionRole \n\n--assume-role-policy-document '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": {\"Service\": \"ecs-tasks.amazonaws.com\"}, \"Action\": \"sts:AssumeRole\"}]}'\n
\n

Attach the AmazonECSTaskExecutionRolePolicy to the role:

\n
aws iam attach-role-policy --role-name ecsTaskExecutionRole \n\n--policy-arn arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy\n
\n

Creating ecsTaskRole (Optional):

\n

The ECS Task Role is optional and should be created if your containers need access to other AWS services such as Amazon RDS or Secrets Manager.

\n
    \n
  1. Go to IAM Console and click Create Role.
  2. \n
  3. Choose Elastic Container Service and then select Elastic Container Service Task as the trusted entity.
  4. \n
  5. Attach any necessary policies (such as SecretsManagerReadWrite or other policies based on your application’s needs).
  6. \n
  7. Name the role ecsTaskRole and click Create role.
  8. \n
\n

Or use these commands using AWS CLI:

\n

Create the role using:

\n
aws iam create-role --role-name ecsTaskRole \n\n--assume-role-policy-document '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": {\"Service\": \"ecs-tasks.amazonaws.com\"}, \"Action\": \"sts:AssumeRole\"}]}'\n
\n

Attach any policies based on the specific AWS services your application needs access to:

\n
aws iam attach-role-policy --role-name ecsTaskRole \n\n--policy-arn arn:aws:iam::<policy-arn-for-service-access>\n
\n

Define the ECS Task Definition

\n

The task definition defines how SpiceDB containers will be configured and run. Below is the JSON configuration for the task definition. To create a task definition:

\n
    \n
  • \n

    AWS Console

    \n
      \n
    • Look for Amazon ECS and then click on Task Definitions on the left
    • \n
    • Click Create new task definition -> Create new task definition with JSON
    • \n
    \n
  • \n
\n

Copy the JSON below:

\n
{\n  \"family\": \"spicedb-task\",\n  \"networkMode\": \"awsvpc\",\n  \"requiresCompatibilities\": [\"FARGATE\"],  \n  \"cpu\": \"512\",  \n  \"memory\": \"1024\",  \n  \"executionRoleArn\": \"arn:aws:iam::<account-id>:role/ecsTaskExecutionRole\", //Copy the ARN from the ecsTaskExecutionRole created above\n  \"taskRoleArn\": \"arn:aws:iam::<account-id>:role/ecsTaskRole\", //Copy the ARN from the ecsTaskRole created above\n  \"containerDefinitions\": [\n    {\n      \"name\": \"spicedb\",\n      \"image\": \"<account-id>.dkr.ecr.<region>.amazonaws.com/spicedb\",  //ECR Repository URI\n      \"essential\": true,\n      \"command\": [\n                \"serve\",\n                \"--grpc-preshared-key\",\n                \"somekey\"  \n            ],\n      \"portMappings\": [\n        {\n          \"containerPort\": 50051,\n          \"hostPort\": 50051,\n          \"protocol\": \"tcp\"\n        }\n      ],\n      \"environment\": [],\n      \"logConfiguration\": {\n        \"logDriver\": \"awslogs\",\n       \"options\": {\n                    \"awslogs-group\": \"/ecs/spicedb-ecs\",\n                    \"mode\": \"non-blocking\",\n                    \"awslogs-create-group\": \"true\",\n                    \"max-buffer-size\": \"25m\",\n                    \"awslogs-region\": \"us-east-1\",\n                    \"awslogs-stream-prefix\": \"ecs\"\n                }\n      }\n    }\n  ]\n}\n
\n

The command section specifies serve which is the primary command for running SpiceDB.\nThis command serves the gRPC and HTTP APIs by default along with a pre-shared key for authenticated requests.

\n

Note: This is purely for learning purposes so any permissions and relationships written to this instance of SpiceDB will be stored in-memory and not in a persistent database.\nTo write relationships to a persistent database, create a Amazon RDS instance for Postgres and note down the DB name, Master Password and Endpoint.

\n

You can add those into the task definition JSON in the command array like this:

\n
\"command\": [\n                \"serve\",\n                \"--grpc-preshared-key\",\n                \"somekey\",\n                \"--datastore-engine\",\n                \"postgres\",\n                \"--datastore-conn-uri\",\n                \"postgres://<username>:<password>@<RDS endpoint>:5432/<dbname>?sslmode=require\"\n            ],\n
\n

The defaults for username and dbname are usually postgres

\n

You can also use the AWS CLI by storing the above JSON in a file an then running this command

\n
aws ecs register-task-definition --cli-input-json file://spicedb-task-definition.json\n
\n

Run the task in a ECS Cluster

\n

Now that we’ve defined a task, we can create a task that would run within your ECS cluster.\nClick on your ECS Cluster created earlier

\n
    \n
  1. Click on the Tasks tab, and then Run new task
  2. \n
  3. Under Compute Configuration, click on Launch Type (since this is just a demo)
  4. \n
  5. Choose FARGATE as Launch Type and LATEST as Platform Version
  6. \n
  7. Under Deployment Configurat \ No newline at end of file diff --git a/scripts/buildSearchIndex.mts b/scripts/buildSearchIndex.mts new file mode 100644 index 0000000..045c4bb --- /dev/null +++ b/scripts/buildSearchIndex.mts @@ -0,0 +1,69 @@ +import * as pagefind from "pagefind"; + +type FeedItem = { + id: string, + content_html: string, + url: string, + title: string, + summary: string, + image: string, + date_modified: string, + date_published: string, + author: { + name: string, + url: string, + } +} + +type FeedResponse = { + items: FeedItem[], +} + +const main = async () => { + // Pull down the contents of the feed + const blogContents: FeedResponse = await fetch("https://authzed.com/feed/json").then(res => res.json()) + + // Create a Pagefind search index to work with + const { index } = await pagefind.createIndex({ verbose: true, logfile: "pagefind.log" }); + + if (!index) { + throw Error("could not create index") + } + + // Index all HTML files generated by this + await index.addDirectory({ + // NOTE: this is relative to the current dir, so + // when you run `pnpm gen:pagefind` you're going relative + // to the repository root. + path: "out", + }); + + // Add feed items to index + await Promise.all(blogContents.items.map(item => ( + index.addCustomRecord({ + url: item.url, + content: item.content_html, + language: "en", + meta: { + title: item.title, + image: item.image, + summary: item.summary, + }, + filters: { + // TODO: add more filters? + author: [item.author.name], + }, + sort: { + // TODO: make sure these work as expected + date: item.date_modified, + published: item.date_published, + } + }) + ))) + + await index.writeFiles({ + outputPath: "public/_pagefind" + }); +} + +main(); diff --git a/scripts/postbuild.sh b/scripts/postbuild.sh index da5c128..aaeabe4 100755 --- a/scripts/postbuild.sh +++ b/scripts/postbuild.sh @@ -5,5 +5,5 @@ set -e # Generate sitemap pnpm exec next-sitemap -# Update blog search data -curl -o public/feed.json https://authzed.com/feed/json +# Generate search +pnpm gen:pagefind diff --git a/tailwind.config.ts b/tailwind.config.ts deleted file mode 100644 index 5da2ea1..0000000 --- a/tailwind.config.ts +++ /dev/null @@ -1,51 +0,0 @@ -import type { Config } from "tailwindcss"; -import colors from "tailwindcss/colors"; - -export default { - // prefix: '', // NOTE: nextra-docs-theme uses 'nx-' as a prefix. Unprefixed classes will take precendence over nx- prefixed classes. - content: [ - "./pages/**/*.{js,ts,jsx,tsx,mdx}", - "./components/**/*.{js,ts,jsx,tsx}", - "./theme.config.tsx", - ], - theme: { - // Copied from nextra-docs-theme to match - screens: { - sm: "640px", - md: "768px", - lg: "1024px", - xl: "1280px", - "2xl": "1536px", - }, - // Copied from nextra-docs-theme to match - fontSize: { - xs: ".75rem", - sm: ".875rem", - base: "1rem", - lg: "1.125rem", - xl: "1.25rem", - "2xl": "1.5rem", - "3xl": "1.875rem", - "4xl": "2.25rem", - "5xl": "3rem", - "6xl": "4rem", - }, - // Copied from nextra-docs-theme to match - letterSpacing: { - tight: "-0.015em", - }, - extend: { - // All default color palettees are available - colors: { - dark: "#111", - transparent: "transparent", - current: "currentColor", - black: "#000", - white: "#fff", - primary: colors.neutral, - }, - }, - }, - plugins: [], - darkMode: ["class", 'html[class~="dark"]'], -} satisfies Config; diff --git a/theme.config.tsx b/theme.config.tsx deleted file mode 100644 index 992e1b8..0000000 --- a/theme.config.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { useRouter } from "next/router"; -import { DocsThemeConfig, useConfig } from "nextra-theme-docs"; -import Banner from "./components/banner"; -import { NavCTA, TocCTA } from "./components/cta"; -import Footer from "./components/footer"; -import { Logo } from "./components/logo"; -import { Flexsearch } from "./components/nextra/Flexsearch"; - -const config: DocsThemeConfig = { - logo: Logo, - logoLink: "https://authzed.com", - project: { link: "https://github.com/authzed/spicedb" }, - head: () => { - const { asPath } = useRouter(); - const { title: titleContent, frontMatter } = useConfig(); - const desc = - frontMatter.description || - "Welcome to the SpiceDB and AuthZed docs site."; - const resolvedTitle = titleContent - ? `${titleContent} - Authzed Docs` - : "Authzed Docs"; - return ( - <> - {resolvedTitle} - - - - - - ); - }, - darkMode: true, - color: { - hue: { dark: 45, light: 290 }, - saturation: { dark: 100, light: 100 }, - }, - chat: { link: "https://authzed.com/discord" }, - docsRepositoryBase: "https://github.com/authzed/docs/blob/main", - banner: { - dismissible: false, - content: , - }, - navbar: { - extraContent: , - }, - sidebar: { - toggleButton: true, - defaultMenuCollapseLevel: 1, - }, - feedback: { - content: ( - - Something unclear? -
    - Create an issue → -
    - ), - }, - toc: { backToTop: true, extraContent: }, - footer: { - component: