diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5f0889c..955fb44 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ version: 2 updates: - - package-ecosystem: "npm" # See documentation for possible values - directory: "/" # Location of package manifests - schedule: - interval: "weekly" + - package-ecosystem: "npm" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 44ee5b2..4a89177 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,4 @@ -name: Build and run yarn project +name: Lint, Build, Smoketest Run on: push: @@ -8,10 +8,15 @@ on: branches: - "*" +# Auto-abort duplicate jobs (e.g. job being triggered again on push to open PR) +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: - build-and-run: + lint: + name: Lint runs-on: ubuntu-latest - steps: - name: Checkout repository uses: actions/checkout@v4 @@ -25,8 +30,34 @@ jobs: - name: Install dependencies run: yarn install --frozen-lockfile - - name: Prettier check - run: yarn add --dev --exact prettier && yarn exec prettier . --write + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: "3.13" + + - name: Install pre-commit + run: | + python -m pip install --upgrade pip + pip install pre-commit + + - name: Run pre-commit + run: pre-commit run --all-files + + build-and-run: + name: Build and Run + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: "22" + cache: "yarn" + + - name: Install dependencies + run: yarn install --frozen-lockfile - name: Build project run: yarn build diff --git a/.gitignore b/.gitignore index ab6ee90..c38e89a 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,4 @@ npm-debug.log* yarn-debug.log* yarn-error.log* -*:Zone.Identifier \ No newline at end of file +*:Zone.Identifier diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..c5c25f7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,26 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + + - repo: https://github.com/igorshubovych/markdownlint-cli + rev: v0.45.0 + hooks: + - id: markdownlint + args: [--config=.markdownlint.json] + + - repo: local + hooks: + - id: prettier + name: prettier + entry: yarn prettier + language: node + types_or: [markdown, mdx] + pass_filenames: true + args: [--write] + additional_dependencies: + - prettier@3.1.0 diff --git a/.prettierrc b/.prettierrc index abd27b9..d146339 100644 --- a/.prettierrc +++ b/.prettierrc @@ -4,5 +4,7 @@ "useTabs": false, "semi": false, "singleQuote": false, - "endOfLine": "lf" -} \ No newline at end of file + "endOfLine": "lf", + "printWidth": 100, + "proseWrap": "always" +} diff --git a/.vscode/settings.json b/.vscode/settings.json index e6e81a4..239f3c3 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,12 @@ { "languageToolLinter.languageTool.ignoredWordsInWorkspace": [ + "hkdf", + "hpke", + "megolm", "polyproto-auth", "polyproto-chat", + "polyproto-core", "polyproto-mls" - ] + ], + "languageToolLinter.languageTool.disabledRules": "UPPERCASE_SENTENCE_START" } diff --git a/README.md b/README.md index 0c6c2c2..a9a66f4 100644 --- a/README.md +++ b/README.md @@ -1,41 +1,67 @@ +[![FAQ-shield]][FAQ] + # Website This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. -### Installation +## Installation -``` -$ yarn +```bash +yarn ``` -### Local Development +### Pre-commit Hooks -``` -$ yarn start -``` +This project uses pre-commit hooks to ensure code quality and uniform formatting. To set up +pre-commit hooks: -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. +1. Install pre-commit: -### Build + ```bash + pip install pre-commit + ``` -``` -$ yarn build -``` +2. Install prettier and dev-dependencies: -This command generates static content into the `build` directory and can be served using any static contents hosting service. + ```bash + yarn install + ``` -### Deployment +3. Install the git hooks: -Using SSH: + ```bash + pre-commit install + ``` -``` -$ USE_SSH=true yarn deploy +The pre-commit hooks will automatically: + +- Format Markdown and MDX files using Prettier +- Lint Markdown files using markdownlint +- Check for common issues like trailing whitespace and file endings + +You can manually run the checks on all files: + +```bash +pre-commit run --all-files ``` -Not using SSH: +### Local Development +```bash +yarn start ``` -$ GIT_USER= yarn deploy + +This command starts a local development server and opens up a browser window. Most changes are +reflected live without having to restart the server. + +### Build + +```bash +yarn build ``` -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. +This command generates static content into the `build` directory and can be served using any static +contents hosting service. + +[FAQ-shield]: https://img.shields.io/badge/Frequently_Asked_Questions_(FAQ)-ff62bd +[FAQ]: https://github.com/polyphony-chat/.github/blob/main/FAQ.md diff --git a/blog/2023-08-17-self-updating-structs.md b/blog/2023-08-17-self-updating-structs.md index 6f4a1a8..1710104 100644 --- a/blog/2023-08-17-self-updating-structs.md +++ b/blog/2023-08-17-self-updating-structs.md @@ -2,41 +2,63 @@ draft: false date: 2023-08-17 categories: - - chorus - - updates + - chorus + - updates authors: - - bitfl0wer + - bitfl0wer --- -# Self-updating structs, moving blog posts to GitHub, and more! +# Self-updating structs, moving blog posts to GitHub, and more -Introducing self-updating structs, explaining how they work, and what they are good for. Also, moving blog posts to GitHub, and other improvements. +Introducing self-updating structs, explaining how they work, and what they are good for. Also, +moving blog posts to GitHub, and other improvements. -{/* truncate */} +{/_ truncate _/} -It has been a while since the last update post - 1 month to be precise! I haven't gotten around to writing one of these, mostly because of personal time- and energy constraints. However, now that these resources are finally replenishing again, I figured that it is once again time! +It has been a while since the last update post - 1 month to be precise! I haven't gotten around to +writing one of these, mostly because of personal time- and energy constraints. However, now that +these resources are finally replenishing again, I figured that it is once again time! ## Moving Blog Posts to GitHub -This is a pretty self-explanatory point. I thought, that opencollective would find more use by me and other polyphony-curious folk, however, this didn't go as planned. Also, opencollective made their Discord embeds really poopy, which is why I am moving all the blog posts over to GitHub. +This is a pretty self-explanatory point. I thought, that opencollective would find more use by me +and other polyphony-curious folk, however, this didn't go as planned. Also, opencollective made +their Discord embeds really poopy, which is why I am moving all the blog posts over to GitHub. ## A big one: Self-updating structs -Ideally, you want entities like Channels, Guilds, or Users to react to Gateway events. A Gateway event is basically a message from Spacebar/Discord to you, which says: "Hey, User `x` has changed their name to `y`!". If you can reflect those changes immediately within your code, you save yourself from having to make a lot of requests and potentially getting rate-limited. +Ideally, you want entities like Channels, Guilds, or Users to react to Gateway events. A Gateway +event is basically a message from Spacebar/Discord to you, which says: "Hey, User `x` has changed +their name to `y`!". If you can reflect those changes immediately within your code, you save +yourself from having to make a lot of requests and potentially getting rate-limited. -This is exactly what Self-updating structs set out to solve. The first implementation was done by @SpecificProtagonist and me (thank you a lot again, btw) on the 21st of July. However: This implementation, being in its' infancy, has had some design flaws, which to me made pretty clear, that this whole thing needed to be thought through a little better. +This is exactly what Self-updating structs set out to solve. The first implementation was done by +@SpecificProtagonist and me (thank you a lot again, btw) on the 21st of July. However: This +implementation, being in its' infancy, has had some design flaws, which to me made pretty clear, +that this whole thing needed to be thought through a little better. -The second iteration of these Self-updating structs was finished... today, actually, by me. It saves memory compared to the first iteration by storing unique objects only once, instead of `n = how many times they are being referenced`-times. While this way of doing things is really efficient, it also has been a pain in the ass to make, which is precisely the reason why this took me so long. I've learned a lot along the way though. +The second iteration of these Self-updating structs was finished... today, actually, by me. It saves +memory compared to the first iteration by storing unique objects only once, instead of +`n = how many times they are being referenced`-times. While this way of doing things is really +efficient, it also has been a pain in the ass to make, which is precisely the reason why this took +me so long. I've learned a lot along the way though. -The public API has also gotten a *lot* better in "v2". This is mostly because I am a big believer in writing tests for your code, and through writing what are essentialy real-world-simulation-examples, I noticed how repetitive or stupid some things were, and thus could improve upon them. +The public API has also gotten a _lot_ better in "v2". This is mostly because I am a big believer in +writing tests for your code, and through writing what are essentialy real-world-simulation-examples, +I noticed how repetitive or stupid some things were, and thus could improve upon them. -Having this whole thing finished is a big relief. This self-updating thing is an essential feature for any Discord/Spacebar compatible library, and I think that we implemented it very nicely. +Having this whole thing finished is a big relief. This self-updating thing is an essential feature +for any Discord/Spacebar compatible library, and I think that we implemented it very nicely. ## Documentation and other improvements -@kozabrada123 took it upon himself to re-write a lot of the codes' Documentation. Thanks for that! This will massively improve the ease of use of this library - both when developing *for* and *with* it. koza also improved our CI/CT pipeline by incorporating build-caching into it, which speeds up builds. +@kozabrada123 took it upon himself to re-write a lot of the codes' Documentation. Thanks for that! +This will massively improve the ease of use of this library - both when developing _for_ and _with_ +it. koza also improved our CI/CT pipeline by incorporating build-caching into it, which speeds up +builds. This has been the last month of Polyphony. In the coming weeks, I will be working on + - Implementing self-updating-struct behavior for every struct which needs it - Fixing bugs - Adding more features, like emojis, 2FA, Guild Settings, etc.! diff --git a/blog/2023-08-29-chorus-alpha.md b/blog/2023-08-29-chorus-alpha.md index b32c692..6757e92 100644 --- a/blog/2023-08-29-chorus-alpha.md +++ b/blog/2023-08-29-chorus-alpha.md @@ -2,29 +2,40 @@ draft: false date: 2023-08-29 categories: - - chorus - - updates + - chorus + - updates authors: - - bitfl0wer + - bitfl0wer --- # chorus Alpha 0.1.0 -We are alpha now! As of 2 days ago, the first Alpha of Chorus, Version 0.1.0, has been released for everyone to look at and use on crates.io! +We are alpha now! As of 2 days ago, the first Alpha of Chorus, Version 0.1.0, has been released for +everyone to look at and use on crates.io! -{/* truncate */} +{/_ truncate _/} So, is the library complete now? No. And yes! It's, well, complicated... Let me explain! -Chorus is at a point where I can comfortably say that, if you take voice-support out of the calculation for a bit, the foundation feels rock-solid, easy to work with and easily expandable. However, to stay with our house/building metaphor for a bit, the walls aren't painted yet, there's barely any furniture and not all of the electrical outlets have been installed yet. +Chorus is at a point where I can comfortably say that, if you take voice-support out of the +calculation for a bit, the foundation feels rock-solid, easy to work with and easily expandable. +However, to stay with our house/building metaphor for a bit, the walls aren't painted yet, there's +barely any furniture and not all of the electrical outlets have been installed yet. -Okay, enough with this bad metaphor; What I meant to convey is, that a lot of the API endpoints have not yet been implemented, and there are at least a few points we haven't addressed yet - like Gateway Error Handling, to name an example. +Okay, enough with this bad metaphor; What I meant to convey is, that a lot of the API endpoints have +not yet been implemented, and there are at least a few points we haven't addressed yet - like +Gateway Error Handling, to name an example. -But for an early Alpha, this, in my opinion, is absolutely acceptable. Implementing API endpoints is something that probably someone who is entirely new to Rust could do, given that we've streamlined the procedure so much, and the other stuff can comfortably be fixed without having to do any major changes to the internals. +But for an early Alpha, this, in my opinion, is absolutely acceptable. Implementing API endpoints is +something that probably someone who is entirely new to Rust could do, given that we've streamlined +the procedure so much, and the other stuff can comfortably be fixed without having to do any major +changes to the internals. -I, for one, am currently experimenting around with the Polyphony Client, which, by the way, will likely be written with Iced as a GUI Framework, not GTK. I have no prior experience in GUI/Desktop Application development, but I am feeling more confident than ever and I'm eager to learn all there is to know about these topics. +I, for one, am currently experimenting around with the Polyphony Client, which, by the way, will +likely be written with Iced as a GUI Framework, not GTK. I have no prior experience in GUI/Desktop +Application development, but I am feeling more confident than ever and I'm eager to learn all there +is to know about these topics. -That's that! Seeya next time. -Cheers, +That's that! Seeya next time. Cheers, Flori diff --git a/blog/2023-09-02-client-getting-started.md b/blog/2023-09-02-client-getting-started.md index 9b76de3..ab75a33 100644 --- a/blog/2023-09-02-client-getting-started.md +++ b/blog/2023-09-02-client-getting-started.md @@ -2,26 +2,39 @@ draft: false date: 2023-09-02 categories: - - polyphony - - updates + - polyphony + - updates authors: - - bitfl0wer + - bitfl0wer --- # Getting started with the Polyphony Client -{/* truncate */} +{/_ truncate _/} -Us labeling Chorus to be in a public-alpha state was really great news for me, for a lot of reasons! It marked a point in Polyphonys history where, after all these months of work, we agreed upon the fact that what we *have* is good enough to be shown to the public, and that's always a nice thing when investing so much of your free-time into a project. -The other main reason why this is such a great thing is, because this alpha state (at least to me) means, that the public API is kind-of stable, or at least stable enough so that I, the project lead, can rely upon the fact that all the public methods will not, in fact, be replaced in 4 days. +Us labeling Chorus to be in a public-alpha state was really great news for me, for a lot of reasons! +It marked a point in Polyphonys history where, after all these months of work, we agreed upon the +fact that what we _have_ is good enough to be shown to the public, and that's always a nice thing +when investing so much of your free-time into a project. The other main reason why this is such a +great thing is, because this alpha state (at least to me) means, that the public API is kind-of +stable, or at least stable enough so that I, the project lead, can rely upon the fact that all the +public methods will not, in fact, be replaced in 4 days. -This means, that I can finally start working on the Client! And I have done that! For the past 2? 3? Days, I've been tinkering around with Iced-rs (a really, really great UI framework for Rust, written in Rust) and the client repository to create the 'skeleton' of the application. While this is definitely not trivial, especially since I have *no* prior experience in desktop application development, it's also not too hard either. +This means, that I can finally start working on the Client! And I have done that! For the past 2? 3? +Days, I've been tinkering around with Iced-rs (a really, really great UI framework for Rust, written +in Rust) and the client repository to create the 'skeleton' of the application. While this is +definitely not trivial, especially since I have _no_ prior experience in desktop application +development, it's also not too hard either. -While Iced is not mature yet, and "how-to" guides, as well as the promised Iced-book, are still largely missing, the maintainers have done a great job with providing a LOT of code examples and solid rustdocs. It's a fun library/framework to work with, and the Elm-inspired approach of dividing up State, Messages, View- and Update-Logic feels really intuitive and seems to make sure that your Application will never end up in an unexpected state. +While Iced is not mature yet, and "how-to" guides, as well as the promised Iced-book, are still +largely missing, the maintainers have done a great job with providing a LOT of code examples and +solid rustdocs. It's a fun library/framework to work with, and the Elm-inspired approach of dividing +up State, Messages, View- and Update-Logic feels really intuitive and seems to make sure that your +Application will never end up in an unexpected state. -That's all I have for today. Thanks for reading this! Here's a video of multi-user login already working ^^ +That's all I have for today. Thanks for reading this! Here's a video of multi-user login already +working ^^ - diff --git a/blog/2023-11-23-webassembly-chorus.md b/blog/2023-11-23-webassembly-chorus.md index 8aa4188..c3e5ce0 100644 --- a/blog/2023-11-23-webassembly-chorus.md +++ b/blog/2023-11-23-webassembly-chorus.md @@ -2,35 +2,52 @@ draft: false date: 2023-11-23 categories: - - chorus - - polyphony - - updates + - chorus + - polyphony + - updates authors: - - bitfl0wer + - bitfl0wer --- # Porting chorus to WebAssembly + Client Update -What the current state of GUI libraries in Rust means for Polyphony and chorus, and why we are porting chorus to WebAssembly. +What the current state of GUI libraries in Rust means for Polyphony and chorus, and why we are +porting chorus to WebAssembly. -{/* truncate */} +{/_ truncate _/} Hi all! -To make this part of the post short: The web-based client will be worked on *before* the native one, if there even ever will be one. The reason is that no currently available native Rust GUI library meets the standards I'd like to see when using it to build an application I am putting my name behind. I'd like to have +To make this part of the post short: The web-based client will be worked on _before_ the native one, +if there even ever will be one. The reason is that no currently available native Rust GUI library +meets the standards I'd like to see when using it to build an application I am putting my name +behind. I'd like to have + - accessibility - great styling - cross compilation - memory safety -and the current state of Rust GUIs essentially tells me to "pick three", which is unacceptable to me. A WebAssembly based application is the best we'll get for now, and I am fine with that. +and the current state of Rust GUIs essentially tells me to "pick three", which is unacceptable to +me. A WebAssembly based application is the best we'll get for now, and I am fine with that. -Compiling to WebAssembly isn't all that easy though: The `wasm32-unknown-unknown` target intentionally makes no assumptions about the environment it is deployed in, and therefore does not provide things like a `net` or `filesystem` implementation (amongst other things). Luckily, adding support for this compilation target only took me a full 40h work week [:)], and we are now the first Rust Discord-API library (that I know of) to support this target. +Compiling to WebAssembly isn't all that easy though: The `wasm32-unknown-unknown` target +intentionally makes no assumptions about the environment it is deployed in, and therefore does not +provide things like a `net` or `filesystem` implementation (amongst other things). Luckily, adding +support for this compilation target only took me a full 40h work week [:)], and we are now the first +Rust Discord-API library (that I know of) to support this target. -You might not have yet heard much about WebAssembly: In the past, web developers could only really use three languages - HTML, CSS, and JavaScript - to write code that browsers could understand directly. With WebAssembly, developers can write code in many other languages, then use WASM to convert it into a form the browser can run. +You might not have yet heard much about WebAssembly: In the past, web developers could only really +use three languages - HTML, CSS, and JavaScript - to write code that browsers could understand +directly. With WebAssembly, developers can write code in many other languages, then use WASM to +convert it into a form the browser can run. -This is particularly helpful for programs that require a lot of computing power, like video games or design software. Before, running such programs in a browser would be slow or impossible. WebAssembly can make these run smoothly, right in your web browser. +This is particularly helpful for programs that require a lot of computing power, like video games or +design software. Before, running such programs in a browser would be slow or impossible. WebAssembly +can make these run smoothly, right in your web browser. -Overall, WebAssembly is expanding the kinds of applications that can be run on the web, making the web a more flexible and powerful place to work and play. Compiling Chorus for WASM allows us to leverage this fairly new technology and bring all of Rusts benefits into a web context. +Overall, WebAssembly is expanding the kinds of applications that can be run on the web, making the +web a more flexible and powerful place to work and play. Compiling Chorus for WASM allows us to +leverage this fairly new technology and bring all of Rusts benefits into a web context. The next blog post will likely be about progress with the web-based client. See ya until then! :) diff --git a/blog/2024-02-07-account-migration.md b/blog/2024-02-07-account-migration.md index f4e1d8f..622f2e9 100644 --- a/blog/2024-02-07-account-migration.md +++ b/blog/2024-02-07-account-migration.md @@ -2,45 +2,45 @@ draft: false date: 2024-02-07 categories: - - polyproto + - polyproto authors: - - bitfl0wer + - bitfl0wer title: Account migration in polyproto --- -Account migration is an important and difficult thing to get right in federated systems. In this blog -post, I will outline how I imagine account migration to work in polyproto, and what benefits this -approach brings. +Account migration is an important and difficult thing to get right in federated systems. In this +blog post, I will outline how I imagine account migration to work in polyproto, and what benefits +this approach brings. -{/* truncate */} +{/_ truncate _/} # Account migration in polyproto -It seems that striking a good balance between user experience, convenience and privacy -has been a difficult task for many federated systems, when it comes to account migration. -polyprotos' approach to how data is distributed and stored, and how identities are managed, makes it -possible to have a very smooth and secure account migration process. +It seems that striking a good balance between user experience, convenience and privacy has been a +difficult task for many federated systems, when it comes to account migration. polyprotos' approach +to how data is distributed and stored, and how identities are managed, makes it possible to have a +very smooth and secure account migration process. ## The problem -Using Mastodon as an example; -When a user wants to move from one instance to another, they have to -create a new account on the new instance, and follow all the people they were following on the -old account. All the toots and other data from the old account are left behind, and you do not have a +Using Mastodon as an example; When a user wants to move from one instance to another, they have to +create a new account on the new instance, and follow all the people they were following on the old +account. All the toots and other data from the old account are left behind, and you do not have a way of porting them over to the new account. This is a problem that has been around for a long time, and it is not just a problem with Mastodon, but with many other federated systems as well. ## How polyproto works, briefly In polyproto, your federation ID, e.g. `xenia@example.com`, is what identifies you. If you want to -use this identity on a client, your client will generate a key pair for a certificate signing request, -and send this request to your home server. Given that you didn't provide any invalid data, your home -server will sign the certificate, and send it back to you. +use this identity on a client, your client will generate a key pair for a certificate signing +request, and send this request to your home server. Given that you didn't provide any invalid data, +your home server will sign the certificate, and send it back to you. -Any data you send to anyone - be it a chat message, a social media post, or anything else - is signed -using your private key. This signature can be verified by anyone using your public key, which is part -of the certificate you received from your home server. To check a certificates' validity, you can -ask the home server for its root certificate, and verify the signature on the certificate you received. +Any data you send to anyone - be it a chat message, a social media post, or anything else - is +signed using your private key. This signature can be verified by anyone using your public key, which +is part of the certificate you received from your home server. To check a certificates' validity, +you can ask the home server for its root certificate, and verify the signature on the certificate +you received. This means: @@ -55,9 +55,9 @@ This is even true when you are sending data to a different server than your home ### Low data centralization -Fundamentally, the process of migrating an account in polyproto relies mostly on changing data ownership, -rather than moving data around. This works best in scenarios where data is highly distributed, and -not stored in a central location. +Fundamentally, the process of migrating an account in polyproto relies mostly on changing data +ownership, rather than moving data around. This works best in scenarios where data is highly +distributed, and not stored in a central location. :::tip[Example] @@ -72,12 +72,12 @@ When you want to move your account from one server to another, you: 2. Then, you configure the new account to back-reference the old account 3. Next, if you are able to, you tell your old home server about the move 4. Last but not least, you verify to the servers storing your data that you are the same person as - the one who created the old account. The servers then update the data ownership to your new account. - This is done by using your old private key(s), in a way that does not reveal your private key(s) to - anyone else. + the one who created the old account. The servers then update the data ownership to your new + account. This is done by using your old private key(s), in a way that does not reveal your + private key(s) to anyone else. -If applicable, your friends and followers will also be notified about the move, keeping -existing relationships intact. +If applicable, your friends and followers will also be notified about the move, keeping existing +relationships intact. :::note @@ -97,17 +97,17 @@ Should data actually need to be moved, for example when the old server is going if the centralization of data is higher, the migration process is extended by a few steps: 1. Using the old account, your client requests a data export from your old home server. -2. The old home server sends you a data export. Your client will check the signatures on the exported - data, to make sure that the data was not tampered with. +2. The old home server sends you a data export. Your client will check the signatures on the + exported data, to make sure that the data was not tampered with. 3. You then import the data into your new account on the new home server. 4. ## Conclusion polyproto's approach to account migration is very user-friendly, and does not require the user to do -anything that is not already part of the normal usage of the system. The process is also very secure, -as it relies on the cryptographic properties of X.509 certificates, and also works across a highly -distributed data model, which, in my opinion, is how the internet *should* be. +anything that is not already part of the normal usage of the system. The process is also very +secure, as it relies on the cryptographic properties of X.509 certificates, and also works across a +highly distributed data model, which, in my opinion, is how the internet _should_ be. The biggest drawback to this approach is that there are a whole lot of web requests involved. Depending on the amount of data, this can take some minutes or possibly even hours. @@ -118,7 +118,7 @@ long time, and are widely used in many different applications. This means that t well understood, and that there are already many great tools in all sorts of programming languages available to work with it. From my point of view, there is no need to reinvent the wheel. -I hope that this article has given you a good understanding of how account migration works in polyproto. -If you have any questions or feedback, feel free to reach out to me via E-Mail, where I can -be reached under `flori@polyphony.chat`. OpenPGP is supported, and my public key can be found on +I hope that this article has given you a good understanding of how account migration works in +polyproto. If you have any questions or feedback, feel free to reach out to me via E-Mail, where I +can be reached under `flori@polyphony.chat`. OpenPGP is supported, and my public key can be found on [keys.openpgp.org (click to download pubkey)](https://keys.openpgp.org/vks/v1/by-fingerprint/1AFF5E2D2145C795AB117C2ADCAE4B6877C6FC4E) diff --git a/blog/2024-02-19-x509-in-polyproto.md b/blog/2024-02-19-x509-in-polyproto.md index 34cafa5..9f5b127 100644 --- a/blog/2024-02-19-x509-in-polyproto.md +++ b/blog/2024-02-19-x509-in-polyproto.md @@ -1,20 +1,20 @@ --- date: 2024-02-19 categories: - - polyproto - - X.509 + - polyproto + - X.509 authors: - - bitfl0wer + - bitfl0wer title: X.509 in polyproto --- # Certificates, please: X.509 in polyproto -This blog post covers a bit about how and why X.509 is used in polyproto, and how we try to make -the process of implementing your own server and incorporating it into an existing network a little +This blog post covers a bit about how and why X.509 is used in polyproto, and how we try to make the +process of implementing your own server and incorporating it into an existing network a little easier. -{/* truncate */} +{/_ truncate _/} :::quote "Authors' note" @@ -32,31 +32,30 @@ federate your identity across a whole network of decentralized services. Specifically, polyproto leverages the already well-documented and widely used X.509 standard at its core. X.509 was chosen over `OpenPGP` because of its comparative simplicity. The Web of Trust from `OpenPGP` often requires active user input to assign trust levels to users and their keys, which is -not inline with our ideas and goals for user experience in a decentralized system. -Ideally, decentralization and federation is as seamless as possible for the end-user, -and X.509 with its Certificate Authority (CA for short) model is the better fit for such a goal. -In fact, X.509 can be *so* seamless to the end-user, that you have probably forgotten that you are -already using it right now! - -HTTPS (SSL/TLS) certificates are likely the most popular form of digital certificate out there, -and they’re implemented in a way, where the only time us humans ever have to think about them, -is when our browser tells us that a certificate from a website we’re trying to visit, is not -valid anymore. - -This popularity is great news for polyproto, because it means that mature tooling for all sorts -of programming languages exists *today*, along with tutorials and documentation, teaching potential +not inline with our ideas and goals for user experience in a decentralized system. Ideally, +decentralization and federation is as seamless as possible for the end-user, and X.509 with its +Certificate Authority (CA for short) model is the better fit for such a goal. In fact, X.509 can be +_so_ seamless to the end-user, that you have probably forgotten that you are already using it right +now! + +HTTPS (SSL/TLS) certificates are likely the most popular form of digital certificate out there, and +they're implemented in a way, where the only time us humans ever have to think about them, is when +our browser tells us that a certificate from a website we're trying to visit, is not valid anymore. + +This popularity is great news for polyproto, because it means that mature tooling for all sorts of +programming languages exists _today_, along with tutorials and documentation, teaching potential implementers how everything works. ## How polyproto uses X.509, briefly In polyproto, home servers act as Certificate Authorities, while each client you connect from has its own end-user Certificate, issued by your home server. With certificates, you can prove your -identity to any person or server at any time. Certificates are also used to verify the integrity -of data sent across the polyproto network. +identity to any person or server at any time. Certificates are also used to verify the integrity of +data sent across the polyproto network. -If servers and clients have well-implemented cryptography, it should be *extremely* unlikely - if -not impossible - for non-quantum-based, non-supercomputer-cluster home servers to alter -the contents of a message before passing them on to the recipient. +If servers and clients have well-implemented cryptography, it should be _extremely_ unlikely - if +not impossible - for non-quantum-based, non-supercomputer-cluster home servers to alter the contents +of a message before passing them on to the recipient. :::quote "Authors note" @@ -82,34 +81,36 @@ This CSR is sent to your home server, which verifies this information and in tur polyproto X.509 Certificate (ID-Cert). Home servers get their root certificate by self-signing a CSR. Unlike actor/client certificates, the -home server root certificate features [X.509 extensions such as the "Basic Constraints" attribute](https://en.wikipedia.org/wiki/X.509#Extensions_informing_a_specific_usage_of_a_certificate), -marking its certificate as a CA certificate, allowing the home server to sign CSRs -using this certificate. +home server root certificate features +[X.509 extensions such as the "Basic Constraints" attribute](https://en.wikipedia.org/wiki/X.509#Extensions_informing_a_specific_usage_of_a_certificate), +marking its certificate as a CA certificate, allowing the home server to sign CSRs using this +certificate. -## But it’s not all perfect. +## But it's not all perfect -Root Certificates in the context of HTTPS and the modern, SSL/TLS protected web are a big source -of centralization. This centralization might be necessary to a degree, but it inevitably means less +Root Certificates in the context of HTTPS and the modern, SSL/TLS protected web are a big source of +centralization. This centralization might be necessary to a degree, but it inevitably means less plurality, and way more hoops to jump through, should you also want to be a CA. To give context for those who might need it, essentially, every certificate for every website out there has to be able to be traced back to one of the root certificates installed on your internet-capable device's operating system or web browser. This creates an incredible amount of centralization, because one Root Certificate Authority is directly responsible for hundreds of -thousands, if not millions of websites. This dependency on a few privileged Root CAs -has been monetized, which is why getting an SSL/TLS certificate for your website -used to cost you money (and depending on who you are, it might still be that way). Nowadays though, +thousands, if not millions of websites. This dependency on a few privileged Root CAs has been +monetized, which is why getting an SSL/TLS certificate for your website used to cost you money (and +depending on who you are, it might still be that way). Nowadays though, [Let's Encrypt](https://letsencrypt.org) exists, offering free SSL/TLS certificates, with the caveat that these certificates are only valid for three months at a time. ## What can we do about this? -To try and keep open polyproto networks to *stay* open for everyone, polyproto should make +To try and keep open polyproto networks to _stay_ open for everyone, polyproto should make centralization to the degree of modern-day SSL/TLS at infeasible. An approach we are taking is limiting the length of the certification path. -In X.509, to validate and trust a certificate, you must also trust all the other certificates leading up to the Root Certificate of the Certificate Tree. +In X.509, to validate and trust a certificate, you must also trust all the other certificates +leading up to the Root Certificate of the Certificate Tree. ```mermaid graph LR @@ -132,11 +133,11 @@ certificates, due to the increasing amount of processing power required to handl to verify and sign CSRs. In polyproto, the maximum length of this certification path is 1, meaning a Root Certificate may -only issue leaf certificates. Cutting out middlemen makes it hard to scale to monstrous levels -of centralization, as the control one CA can have over the entire network is limited. +only issue leaf certificates. Cutting out middlemen makes it hard to scale to monstrous levels of +centralization, as the control one CA can have over the entire network is limited. -All of these factors combined should always make developing or hosting your own home server a -viable option. +All of these factors combined should always make developing or hosting your own home server a viable +option. :::quote "Authors note" @@ -152,6 +153,6 @@ viable option. --- -If you have any questions or feedback, feel free to reach out to me via email, where you can -reach me under `flori@polyphony.chat`. OpenPGP is supported, and my public key can be found on -[keys.openpgp.org (click to download pubkey)](https://keys.openpgp.org/vks/v1/by-fingerprint/1AFF5E2D2145C795AB117C2ADCAE4B6877C6FC4E) +If you have any questions or feedback, feel free to reach out to me via email, where you can reach +me under `flori@polyphony.chat`. OpenPGP is supported, and my public key can be found on +[keys.openpgp.org](https://keys.openpgp.org/vks/v1/by-fingerprint/1AFF5E2D2145C795AB117C2ADCAE4B6877C6FC4E) diff --git a/blog/2024-03-06-updates-and-vacation.md b/blog/2024-03-06-updates-and-vacation.md index b440783..f934dd7 100644 --- a/blog/2024-03-06-updates-and-vacation.md +++ b/blog/2024-03-06-updates-and-vacation.md @@ -1,81 +1,92 @@ --- date: 2024-03-06 categories: - - polyproto - - updates + - polyproto + - updates authors: - - bitfl0wer + - bitfl0wer title: Work on polyproto and a "vacation" ⛱️ --- # Work on polyproto and taking a break -In this little update post I write about what I've done in the last couple of weeks alongside talking about taking just a little break -(don't worry, y'all are not getting rid of me!) +In this little update post I write about what I've done in the last couple of weeks alongside +talking about taking just a little break (don't worry, y'all are not getting rid of me!) -{/* truncate */} +{/_ truncate _/} It's been more or less two weeks since the last post - time for the next one! -A good amount of commits have been since the [X.509 in polyproto](https://docs.polyphony.chat/blog/2024/02/19/x509-in-polyproto/) was -published. Let's break them down a little, shall we? +A good amount of commits have been since the +[X.509 in polyproto](https://docs.polyphony.chat/blog/2024/02/19/x509-in-polyproto/) was published. +Let's break them down a little, shall we? --- ## Certificate Signing Requests -The polyproto crate can now be used to create very basic - but to the best of my knowledge fully RFC compliant - Certificate Signing -Requests! This is cool, because Certificate Signing Requests are how all Actors (Users) in polyproto will request a Certificate -from their home server. The generated CSRs can be fully verified using the OpenSSL/LibreSSL CLIs, which is very important, as these -two applications are the industry standard when it comes to working with cryptographic standards like X.509. +The polyproto crate can now be used to create very basic - but to the best of my knowledge fully RFC +compliant - Certificate Signing Requests! This is cool, because Certificate Signing Requests are how +all Actors (Users) in polyproto will request a Certificate from their home server. The generated +CSRs can be fully verified using the OpenSSL/LibreSSL CLIs, which is very important, as these two +applications are the industry standard when it comes to working with cryptographic standards like +X.509. -Specifically, polyproto uses the well-defined [PKCS #10 standard](http://www.pkiglobe.org/pkcs10.html) to pack up and transport all -the needed CSR information to your future home server. +Specifically, polyproto uses the well-defined +[PKCS #10 standard](http://www.pkiglobe.org/pkcs10.html) to pack up and transport all the needed CSR +information to your future home server. The next steps here are: - Creating validators for the information supplied in the CSRs - Implementing methods to create an ID-Cert from a CSR -- Write great documentation for what exactly the data inside of the ID-CSR has to look like to be valid +- Write great documentation for what exactly the data inside of the ID-CSR has to look like to be + valid -...and as you might have already guessed, I am already working on all of these things! :) They just take time:tm: +...and as you might have already guessed, I am already working on all of these things! :) They just +take time:tm: --- ## Cleaning up -As fun as designing APIs and software architecture is for me, I don't yet always get all of it right on the first try. This is fine -though, as long as you recognize the mistakes you've made, learn from them and clean the mess you've made. +As fun as designing APIs and software architecture is for me, I don't yet always get all of it right +on the first try. This is fine though, as long as you recognize the mistakes you've made, learn from +them and clean the mess you've made. -I noticed that, as well-meant as some of the traits and trait bounds I've added, they made implementing polyprotos' base types and -traits a lot harder than needed. I've been chipping away at the unnecessary and redundant bits, removing some of these traits entirely. +I noticed that, as well-meant as some of the traits and trait bounds I've added, they made +implementing polyprotos' base types and traits a lot harder than needed. I've been chipping away at +the unnecessary and redundant bits, removing some of these traits entirely. --- ## Updating the specification document -I really wanted to get started on a reference polyproto implementation *before* finishing the specification document. This might seem -a little counter intuitive, but my thought process was, that implementing the crate in code would force me to think about everything -from scratch again, which would make it much easier to spot mistakes I potentially made when writing the specification documentation. -These mistakes would primarily be: +I really wanted to get started on a reference polyproto implementation _before_ finishing the +specification document. This might seem a little counter intuitive, but my thought process was, that +implementing the crate in code would force me to think about everything from scratch again, which +would make it much easier to spot mistakes I potentially made when writing the specification +documentation. These mistakes would primarily be: - Information that is there, but unimportant - Information that is important, but not there - Information that is important, there, but wrong -This turned out to be right. I have added a lot of "TODO"s and "FIXME"s into the specification document since started working on the -polyproto crate. All of these TODOs have since been worked on and removed! This doesn't mean that the specification document is now -perfect, but it's already better than before, and it'll only get better as I continue to work on the crate! +This turned out to be right. I have added a lot of "TODO"s and "FIXME"s into the specification +document since started working on the polyproto crate. All of these TODOs have since been worked on +and removed! This doesn't mean that the specification document is now perfect, but it's already +better than before, and it'll only get better as I continue to work on the crate! -Another, notable thing that happened is *removing the auth-part from the core polyproto protocol*! You might be thinking "whaaaat? -does that mean that there will be no authentication in polyproto??" but I can assure you, that that's not what this means. Removing -the authentication endpoints from the core protocol means that polyproto extensions can now choose authentication technologies and -methods for themselves, instead of being forced to implement a bunch of REST-based authentication endpoints they might not even want -or use anyways. +Another, notable thing that happened is _removing the auth-part from the core polyproto protocol_! +You might be thinking "whaaaat? does that mean that there will be no authentication in polyproto??" +but I can assure you, that that's not what this means. Removing the authentication endpoints from +the core protocol means that polyproto extensions can now choose authentication technologies and +methods for themselves, instead of being forced to implement a bunch of REST-based authentication +endpoints they might not even want or use anyways. -I would like to thank `@laxla@tech.lgbt` for this idea! :> Collaboration and feedback are truly great things, and I am happy to have such -a nice group of people on Discord and Matrix who are genuinely interested in the silly thing I/we want to do with Polyphony and -polyproto :) +I would like to thank `@laxla@tech.lgbt` for this idea! :> Collaboration and feedback are truly +great things, and I am happy to have such a nice group of people on Discord and Matrix who are +genuinely interested in the silly thing I/we want to do with Polyphony and polyproto :) --- @@ -85,8 +96,9 @@ Now for the perhaps biggest and probably most important announcement: --- -It just dawned on me that March 8th marks the one year anniversary of Polyphony!! That's genuinely so cool, and means that this is -the project I have worked on the longest for, out of all of my personal projects. +It just dawned on me that March 8th marks the one year anniversary of Polyphony!! That's genuinely +so cool, and means that this is the project I have worked on the longest for, out of all of my +personal projects. So yeah - it's been almost a year now! And not a lazy one for me, either. @@ -97,22 +109,28 @@ So yeah - it's been almost a year now! And not a lazy one for me, either. The following paragraph covers the topics of anxiety and depression. If you would not like to read about this, feel free to scroll down until you see a big green box with a check mark. The box indicates that it is safe for you to read again! -Big shocker: I am 👻👻👻👻 depreeeeeeessed 👻👻👻👻👻, and have been for the past... 4-6 years of my life. In that time, I have experienced -the absolute lowest points of my life. Luckily, I have the absolute privilege to have a great therapist who I have been with for 2 years -now, and I am also on medication which already does a good job (most of the time) at taking the edge off the depression. - -As it has been explained to me by my therapist, medication should only be a crutch, though. It should not be the tool you should solely -rely on for the rest of your life to deal with extreme (social) anxiety and depression. Other, non-medication-related options should -be tried, to potentially get you to stop having to take medication to feel non-completely-absolutely-positively-awful every day. - -One of these options is therapy, and, as I've mentioned, I've already been doing that for 2+ years now. It has helped me a great, great -deal already, and I can absolutely encourage anyone reading who is feeling similarly to how I've described and who is in the lucky position -to get (or at least be put on a waiting list for) therapy, to take the first step. It isn't easy; it can actually feel really really scary -at first. But do believe me when I say that a good therapist can absolutely help you to get better. - -But one hour of therapy a week can sadly only do *so* much. This is why I, with the encouragement of my friends, loved ones -(particularly my lovely, lovely girlfriend) and my therapist, have decided to admit myself into a mental health clinic that specializes -in the treatment of depression, anxiety disorders and the like. +Big shocker: I am 👻👻👻👻 depreeeeeeessed 👻👻👻👻👻, and have been for the past... 4-6 years of my +life. In that time, I have experienced the absolute lowest points of my life. Luckily, I have the +absolute privilege to have a great therapist who I have been with for 2 years now, and I am also on +medication which already does a good job (most of the time) at taking the edge off the depression. + +As it has been explained to me by my therapist, medication should only be a crutch, though. It +should not be the tool you should solely rely on for the rest of your life to deal with extreme +(social) anxiety and depression. Other, non-medication-related options should be tried, to +potentially get you to stop having to take medication to feel +non-completely-absolutely-positively-awful every day. + +One of these options is therapy, and, as I've mentioned, I've already been doing that for 2+ years +now. It has helped me a great, great deal already, and I can absolutely encourage anyone reading who +is feeling similarly to how I've described and who is in the lucky position to get (or at least be +put on a waiting list for) therapy, to take the first step. It isn't easy; it can actually feel +really really scary at first. But do believe me when I say that a good therapist can absolutely help +you to get better. + +But one hour of therapy a week can sadly only do _so_ much. This is why I, with the encouragement of +my friends, loved ones (particularly my lovely, lovely girlfriend) and my therapist, have decided to +admit myself into a mental health clinic that specializes in the treatment of depression, anxiety +disorders and the like. :::tip[Safety checkpoint reached!] @@ -120,16 +138,19 @@ in the treatment of depression, anxiety disorders and the like. ::: -Starting on March 20th, I will be leaving my everyday life, my girlfriend, my -friends, laptop, work, personal projects and everything else behind to go there, and hopefully leave a good bad part of me behind when I come back. +Starting on March 20th, I will be leaving my everyday life, my girlfriend, my friends, laptop, work, +personal projects and everything else behind to go there, and hopefully leave a good bad part of me +behind when I come back. -The clinic is far away though, and leaving absolutely everything behind for a month or possibly a little longer is really, really scary -to me. However, I think and hope that the metaphorical plunge into icy water will be worth it for me and my mental health. +The clinic is far away though, and leaving absolutely everything behind for a month or possibly a +little longer is really, really scary to me. However, I think and hope that the metaphorical plunge +into icy water will be worth it for me and my mental health. -When I come back, I'll be better than I was before, which will also mean that I can hopefully be more happy and productive in all aspects of -my life, including Polyphony. +When I come back, I'll be better than I was before, which will also mean that I can hopefully be +more happy and productive in all aspects of my life, including Polyphony. -If you're reading this on or after March 20th, then see you on the other side :) I hope the grass is greener there! +If you're reading this on or after March 20th, then see you on the other side :) I hope the grass is +greener there! :::quote "BEGPOSTING ON MAIN" diff --git a/blog/2024-06-01-extensions.md b/blog/2024-06-01-extensions.md index d6b2921..5c75052 100644 --- a/blog/2024-06-01-extensions.md +++ b/blog/2024-06-01-extensions.md @@ -1,26 +1,26 @@ --- date: 2024-06-01 categories: - - polyproto + - polyproto authors: - - bitfl0wer + - bitfl0wer title: polyproto extensions --- -# polyproto extensions. +# polyproto extensions polyproto is a new federation protocol. Its main focus is enabling seamless participation of one actor on many different servers. The core specification lacks routes for sending any sort of user generated data anywhere, though. What is up with that? -{/* truncate */} +{/_ truncate _/} ## To federate is to be familiar If any application wants to participate in the network of polyproto services, it has to speak the same language as those other services. When wanting to send a message to a server that you are -authenticated on, your client needs to know exactly what that HTTP request has to look like. This -is nothing new. One take on a solution for this problem stems from the people working on the +authenticated on, your client needs to know exactly what that HTTP request has to look like. This is +nothing new. One take on a solution for this problem stems from the people working on the ATProtocol, who created [Lexicon](https://atproto.com/guides/lexicon). From the atproto website: :::quote "Lexicon TL;DR" @@ -35,49 +35,51 @@ polyproto implementation. Lexicon sounds interesting and really versatile! However, as mature as the idea itself might be, it is pretty complex and does not yet seem to have good community support in the form of -libraries/crates to aid in working with this new schema system. I also do not want to force polyproto -integrations to use a (potentially very complex) Lexicon parser and dynamic routing system -thingymajig - although having "no rules" means, that if you *want* to build a polyproto service +libraries/crates to aid in working with this new schema system. I also do not want to force +polyproto integrations to use a (potentially very complex) Lexicon parser and dynamic routing system +thingymajig - although having "no rules" means, that if you _want_ to build a polyproto service which uses Lexicon, you absolutely can. ## We need a common foundation I am a big proponent of defining a set of (mutually independent) protocol extensions, which include -additionally needed behavior and concrete HTTP routes for building a specific application. This has the following benefits: +additionally needed behavior and concrete HTTP routes for building a specific application. This has +the following benefits: -- If you'd like to build a polyproto chat client, and there's a polyproto-chat extension, you - simply need to add the additional things required by that extension. No need for complex parsing! Code only what you need and do not care about the rest. +- If you'd like to build a polyproto chat client, and there's a polyproto-chat extension, you simply + need to add the additional things required by that extension. No need for complex parsing! Code + only what you need and do not care about the rest. - Mutual independence means being able to combine extensions however you'd like. You could, for example, create a chat app with integrated microblogging functionality. - Developers are free to come up with whatever they want. How about ActivityPub x polyproto? Since polyproto doesn't define a message format, this is absolutely possible! -- Simplicity! polyproto and its "official" extensions will always just have plain old REST APIs, - for which tooling is readily available. Why bother with something fancy and dynamic, when this - does the trick? +- Simplicity! polyproto and its "official" extensions will always just have plain old REST APIs, for + which tooling is readily available. Why bother with something fancy and dynamic, when this does + the trick? On the other hand, everyone now has to agree on one extension to use for a specific application. You -cannot participate on servers, which have use an extension which is completely different from the one -that your client implements, as an example. +cannot participate on servers, which have use an extension which is completely different from the +one that your client implements, as an example. -## ...the *polyproto* foundation. Get it? *sigh* +## ...the _polyproto_ foundation. Get it? _sigh_ -To develop, provide and maintain polyproto and some major "official" extensions (such as polyproto-chat), -creating a non-profit foundation is likely a good idea for a future where polyproto is actually being -used in the real world. +To develop, provide and maintain polyproto and some major "official" extensions (such as +polyproto-chat), creating a non-profit foundation is likely a good idea for a future where polyproto +is actually being used in the real world. This could sort of be seen like the XMPP Standards Foundation which develops and maintains XMPP -extensions. Unlike XMPPs extensions however, official polyproto extensions should always be *major* +extensions. Unlike XMPPs extensions however, official polyproto extensions should always be _major_ additions in functionality. As an example: [XEP-0084](https://xmpp.org/extensions/xep-0084.html) is -the official XMPP extension for User Avatars. An entire 12 point document, which describes one simple -feature! +the official XMPP extension for User Avatars. An entire 12 point document, which describes one +simple feature! polyproto extensions should either always be a major technological addition, which can be taken -advantage of by other extensions (examples for this would be WebSocket Gateways and -Messaging Layer Security), or a document describing a **set** of routes, which define a particular -application use case (A Discord-like, a Reddit-like, a Twitter-like, and so on). Having official -extensions adhere to these rules ensures that polyproto will not become a cluttered mess of -extensions and that it and its extensions are easy to understand and implement, due to less -documentation having to be read and written. +advantage of by other extensions (examples for this would be WebSocket Gateways and Messaging Layer +Security), or a document describing a **set** of routes, which define a particular application use +case (A Discord-like, a Reddit-like, a Twitter-like, and so on). Having official extensions adhere +to these rules ensures that polyproto will not become a cluttered mess of extensions and that it and +its extensions are easy to understand and implement, due to less documentation having to be read and +written. ## Is this a bottleneck for me as a developer @@ -89,12 +91,12 @@ If you are a developer, you might ask yourself: limit what I can do with my application? I have planned for a cool feature X to exist in my chat service, but that doesn't exist in the protocol extension! -Extensions should be a usable minimum of common behavior that all implementations targeting the -same "class" of application must share. Implementations can absolutely offer all the additional -special/unique features they'd like, though. polyproto clients implementing the same extensions -can be treated as clients with a reduced feature set in this case. What is crucial, however, is -that the additional features do not prohibit "reduced feature set clients" from using the behavior -described in the extension, if any sort of federation or interoperability is wanted. +Extensions should be a usable minimum of common behavior that all implementations targeting the same +"class" of application must share. Implementations can absolutely offer all the additional +special/unique features they'd like, though. polyproto clients implementing the same extensions can +be treated as clients with a reduced feature set in this case. What is crucial, however, is that the +additional features do not prohibit "reduced feature set clients" from using the behavior described +in the extension, if any sort of federation or interoperability is wanted. :::tip[Example: What works] @@ -129,7 +131,8 @@ implement. These are my current plans, ideas and thoughts for making a v1 of polyproto extensible. If you have any thoughts on this matter, please do let me know! You can contact me via -[email](mailto:flori@polyphony.chat) or by writing a message on our [Discord](https://discord.com/invite/m3FpcapGDD). +[email](mailto:flori@polyphony.chat) or by writing a message on our +[Discord](https://discord.com/invite/m3FpcapGDD). Thank you for reading! :> diff --git a/blog/2024-10-14-nlnet-grant-application.md b/blog/2024-10-14-nlnet-grant-application.md index ee9f658..91f764b 100644 --- a/blog/2024-10-14-nlnet-grant-application.md +++ b/blog/2024-10-14-nlnet-grant-application.md @@ -1,199 +1,237 @@ --- date: 2024-10-14 categories: - - polyproto - - updates + - polyproto + - updates authors: - - bitfl0wer + - bitfl0wer title: NLnet grant application --- # NLnet grant application -The [NLnet foundation](https://nlnet.nl/) is a non-profit organization that supports open-source projects. They have a -grant program that funds projects that align with their goals. On behalf of Polyphony and the -polyproto project, I have submitted an application for a grant of 10,000€ from the NLnet foundation -in their funding round of October 2024. +The [NLnet foundation](https://nlnet.nl/) is a non-profit organization that supports open-source +projects. They have a grant program that funds projects that align with their goals. On behalf of +Polyphony and the polyproto project, I have submitted an application for a grant of 10,000€ from the +NLnet foundation in their funding round of October 2024. -{/* truncate */} +{/_ truncate _/} Should we be successful in our application, the grant will be used to fund the development of the Polyphony and polyproto projects, which would rapidly increase the development velocity of both projects and bring them one big step closer to being ready for a public alpha release. -The application required a bunch of different, interesting questions to be answered. I would like -to share the answers with you, as they give a good overview of what we are working on, what we -are planning to do, and considerations we have made in the past. +The application required a bunch of different, interesting questions to be answered. I would like to +share the answers with you, as they give a good overview of what we are working on, what we are +planning to do, and considerations we have made in the past. ## Can you explain the whole project and its expected outcome(s)? -polyproto is a new federation protocol with the goal of offering account portability and an effortless experience for users and developers alike. -It is part of the Polyphony Project, which aims to be a truly competitive alternative to centralized, proprietary chat services like Discord. -We came up with the idea for polyproto because of a lingering frustration with current federation protocols and their sub-optimal suitability for -such a project. - -polyproto is not limited to an application in a chat service, and that it is not incompatible with federation protocols -such as ActivityPub. It would technically be possible to write a polyproto + ActivityPub server and client to offer new possibilities to the -currently existing Fediverse. We want to empower users, not split userbases further. - -Our goal is to deliver the Polyphony chat service with polyproto at its core, build great SDKs for other developers to work with, and to also directly -work together with other developers to get alternative implementations of polyproto-based chat services for users to choose from. Polyphony -should be the ideal, federated and decentralized Discord replacement; a service, that can be used by teenagers, the elderly and anyone in -between and which ideally does not require any additional technical knowledge or proficiency to use. - -Documentation/Protocol specification: -https://docs.polyphony.chat/Protocol%20Specifications/core/ -Simplified overview of the protocol (sadly a little dated, but it can give an overview of the basics nonetheless): -https://docs.polyphony.chat/Overviews/core/ -API Documentation: -https://docs.polyphony.chat/APIs/core/ -"polyproto-rs" Rust crate: -https://github.com/polyphony-chat/polyproto-rs -Polyphony organization overview: -https://github.com/polyphony-chat -"chorus" API Wrapper for Discord, Spacebar-Chat (formerly "Fosscord") and our own server: -https://github.com/polyphony-chat/chorus +polyproto is a new federation protocol with the goal of offering account portability and an +effortless experience for users and developers alike. It is part of the Polyphony Project, which +aims to be a truly competitive alternative to centralized, proprietary chat services like Discord. +We came up with the idea for polyproto because of a lingering frustration with current federation +protocols and their sub-optimal suitability for such a project. + +polyproto is not limited to an application in a chat service, and that it is not incompatible with +federation protocols such as ActivityPub. It would technically be possible to write a polyproto + +ActivityPub server and client to offer new possibilities to the currently existing Fediverse. We +want to empower users, not split userbases further. + +Our goal is to deliver the Polyphony chat service with polyproto at its core, build great SDKs for +other developers to work with, and to also directly work together with other developers to get +alternative implementations of polyproto-based chat services for users to choose from. Polyphony +should be the ideal, federated and decentralized Discord replacement; a service, that can be used by +teenagers, the elderly and anyone in between and which ideally does not require any additional +technical knowledge or proficiency to use. + +- [Documentation/Protocol specification](https://docs.polyphony.chat/Protocol%20Specifications/core/) +- [Simplified overview of the protocol](https://docs.polyphony.chat/Overviews/core/) +- [API Documentation](https://docs.polyphony.chat/APIs/core/) +- ["polyproto-rs" Rust crate](https://github.com/polyphony-chat/polyproto-rs) +- [Polyphony organization overview](https://github.com/polyphony-chat) +- ["chorus" API Wrapper](https://github.com/polyphony-chat/chorus) ## Have you been involved with projects or organisations relevant to this project before? And if so, can you tell us a bit about your contributions? -I (Flori Weber, bitfl0wer) have been following the Spacebar-Chat (formerly "Fosscord") project for some time before deciding to start the -Polyphony-Chat GitHub organization in March 2023. The contributions I have made to the Spacebar project were limited to additions and overhauls -of the projects documentation, because of a lack of TypeScript knowledge, which is the programming language primarily used in the Spacebar -organization. Of course, I have prior experience in software development and software design through my work at Deutsche Telekom MMS GmbH, -but this is my first project with such a topic. I am part of a software development community called "Commune", which is a home for individuals -and groups who also have interest in federated and/or decentralized social media, as well as "reclaiming" the web from the hands of large -corporations. There, I have access to like-minded individuals who are also very much interested in polyproto and in seeing polyproto succeed. +I (Flori Weber, bitfl0wer) have been following the Spacebar-Chat (formerly "Fosscord") project for +some time before deciding to start the Polyphony-Chat GitHub organization in March 2023. The +contributions I have made to the Spacebar project were limited to additions and overhauls of the +projects documentation, because of a lack of TypeScript knowledge, which is the programming language +primarily used in the Spacebar organization. Of course, I have prior experience in software +development and software design through my work at Deutsche Telekom MMS GmbH, but this is my first +project with such a topic. I am part of a software development community called "Commune", which is +a home for individuals and groups who also have interest in federated and/or decentralized social +media, as well as "reclaiming" the web from the hands of large corporations. There, I have access to +like-minded individuals who are also very much interested in polyproto and in seeing polyproto +succeed. ## Requested Amount + $10.000 ## Explain what the requested budget will be used for? Does the project have other funding sources, both past and present? -There are three key things that I (Flori Weber, bitfl0wer) have been wanting to tackle for months now, but have never been able to do, because of -a lack of available personal time or expertise. These three things are: +There are three key things that I (Flori Weber, bitfl0wer) have been wanting to tackle for months +now, but have never been able to do, because of a lack of available personal time or expertise. +These three things are: 1. Writing new material and, if necessary, reworking existing material that - - Describes, in a condensed form, what polyproto is about, targeted towards people who do not have [a lot of] existing knowledge in the topics of - federation and decentralized social networking concepts ($250-400) - - Describes, in a condensed form, how polyproto works, targeted towards developers who might be interested in learning more about the inner - workings of polyproto, without needing to read the entire protocol specification document ($250-400) + - Describes, in a condensed form, what polyproto is about, targeted towards people who do not + have [a lot of] existing knowledge in the topics of federation and decentralized social + networking concepts ($250-400) + - Describes, in a condensed form, how polyproto works, targeted towards developers who might be + interested in learning more about the inner workings of polyproto, without needing to read the + entire protocol specification document ($250-400) 2. Starting to build some sort of "brand" by - - Commissioning an artist to create a recognizable logo for polyproto ($200-400) - - Commissioning a frontend developer to build a landing page for our project, since non-developers do seem to prefer information hosted outside - of GitHub and plain looking documentation pages. This landing page would also host the written material mentioned in 1. ($500-1500) -3. Paying (freelance) developers to expedite this projects' journey to completion, where there are the following tasks we could use additional brains on: - - Paying developers to start integrating our polyproto crate into our "chorus" client library and "symfonia" server (~$2000) - - Stabilizing and extending the symfonia server to host a first, publicly usable instance (~$1500) - - Getting additional help from UI/UX designers and frontend developers to build a client mockup, then having this mockup translated into - a client prototype which can be hosted alongside the symfonia server (~$2000) - - "Overhaul"/Refactoring tasks which we, as a group of mainly university students working part time jobs in addition, simply did not yet have the - time to get to (~$1000-1500) - -This would total to $7700 or $9700, depending on whether the lower or higher estimate is used. + - Commissioning an artist to create a recognizable logo for polyproto ($200-400) + - Commissioning a frontend developer to build a landing page for our project, since + non-developers do seem to prefer information hosted outside of GitHub and plain looking + documentation pages. This landing page would also host the written material mentioned in 1. + ($500-1500) +3. Paying (freelance) developers to expedite this projects' journey to completion, where there are + the following tasks we could use additional brains on: + - Paying developers to start integrating our polyproto crate into our "chorus" client library + and "symfonia" server (~$2000) + - Stabilizing and extending the symfonia server to host a first, publicly usable instance + (~$1500) + - Getting additional help from UI/UX designers and frontend developers to build a client mockup, + then having this mockup translated into a client prototype which can be hosted alongside the + symfonia server (~$2000) + - "Overhaul"/Refactoring tasks which we, as a group of mainly university students working part + time jobs in addition, simply did not yet have the time to get to (~$1000-1500) + +This would total to +$7700 or $9700, depending on whether the lower or higher estimate is used. Additionally, I would like to extend the domains polyphony.chat and polyproto.org for some years using the funding and top up our prepaid E-Mail -server hosted at https://uberspace.de/en/. The domain and the E-Mail server make up most of our current operating costs, ranging between 7-12$ a month. -This might not sound like a lot in the grand scheme of things, but I am currently paying for these out of my own pocket as an undergraduate with -little income, so being able to potentially reduce monthly expenses is a nice prospect. +server hosted at [Uberspace](https://uberspace.de/en/). The domain and the E-Mail server make up most of our current operating costs, ranging between 7-12$ +a month. This might not sound like a lot in the grand scheme of things, but I am currently paying +for these out of my own pocket as an undergraduate with little income, so being able to potentially +reduce monthly expenses is a nice prospect. -We currently have and have never had additional/other sources of funding. Receiving funding from NLNet would thus be our first source of funding. +We currently have and have never had additional/other sources of funding. Receiving funding from +NLNet would thus be our first source of funding. -## Compare your own project with existing or historical efforts. +## Compare your own project with existing or historical efforts ### Spacebar Chat -Already previously mentioned, Spacebar Chat is also working on an open-source Discord replacement in form of offering an API-compatible server. -However, they do not seem interested in making Spacebar Chat a federated chat application with good user experience, as their primary focus -is to reverse-engineer and re-implement the Discord API spec-for-spec. +Already previously mentioned, Spacebar Chat is also working on an open-source Discord replacement in +form of offering an API-compatible server. However, they do not seem interested in making Spacebar +Chat a federated chat application with good user experience, as their primary focus is to +reverse-engineer and re-implement the Discord API spec-for-spec. -From talking to Spacebar Maintainers, their code reportedly seems to have accrued noticeable amounts of technical debt which made it undesirable -for most of the maintainers to continue development on the server. Being friends with some of the now mostly inactive maintainers, I have -considered forking the server repository to have an already mostly working starting ground to work with. However, due to the reports of -technical debt and our organizations' unfamiliarity with JavaScript/TypeScript, we have decided to start from scratch, in Rust. +From talking to Spacebar Maintainers, their code reportedly seems to have accrued noticeable amounts +of technical debt which made it undesirable for most of the maintainers to continue development on +the server. Being friends with some of the now mostly inactive maintainers, I have considered +forking the server repository to have an already mostly working starting ground to work with. +However, due to the reports of technical debt and our organizations' unfamiliarity with +JavaScript/TypeScript, we have decided to start from scratch, in Rust. -The accumulated knowledge that Spacebar contributors and maintainers have collected in form of documentation has already been of great use -for our project, and we are contributing back by updating documentation and creating issues, when we find disparities between the behaviours of -our own server implementation and their server implementation. +The accumulated knowledge that Spacebar contributors and maintainers have collected in form of +documentation has already been of great use for our project, and we are contributing back by +updating documentation and creating issues, when we find disparities between the behaviours of our +own server implementation and their server implementation. ### XMPP Much like XMPP, I have decided to make polyproto an extensible protocol. -I am of the opinion that XMPPs biggest downfall is how many extensions there are, and that a server aiming to be compatible with other -implementations of XMPP-based chat services should aim to implement all of the XEPs to be a viable choice. - -polyproto is actively trying to circumvent this by limiting polyproto extensions (P2 extensions for short) to - -- either be a **set** of APIs and behaviours, defining a generic(!) version of a service. A "service" is, for example, a chat application, - a microblogging application or an image blogging application. Service extensions should be the core functionality that is universally needed - to make an application function. In the case of a chat application, that might be: - - - Defining message group size granularity: Direct messages, Group messages, Guild-based messages - - Defining what a room looks like - - Defining the APIs and behaviours required to send and receive messages - - Defining the APIs and behaviours required to perform commonly sought after things, such as reacting to a message with an emoji - - etc. - - The goal is that all different polyproto-based chat applications should then implement this shared behaviour. Of course, developers - may absolutely add their own behaviours and functionality which is perhaps exclusive to their specific implementation. Core - functionality remains commonly defined however, which should make all polyproto-based chat applications interoperable in these - defined, common behaviours. - -- or describe a **major** technological addition, which can be used in the "requires" section of another P2 extension. This "requires" - section can be thought of like the dependency list of a software package. - - Technological additions might be: - - Defining APIs and behaviours needed to implement the MLS (Messaging Layer Security) Protocol - - Defining APIs and behaviours needed to establish and maintain a WebSocket connection, and how to send/receive messages over this - WebSocket connection. - -By using clay-brick-sized building blocks instead of more LEGO-sized building blocks like XMPP does, we hope to mitigate this problem -that we perceive, while still offering an extensible yet well-defined platform to build on. +I am of the opinion that XMPPs biggest downfall is how many extensions there are, and that a server +aiming to be compatible with other implementations of XMPP-based chat services should aim to +implement all of the XEPs to be a viable choice. + +polyproto is actively trying to circumvent this by limiting polyproto extensions (P2 extensions for +short) to + +- either be a **set** of APIs and behaviours, defining a generic(!) version of a service. A + "service" is, for example, a chat application, a microblogging application or an image blogging + application. Service extensions should be the core functionality that is universally needed to + make an application function. In the case of a chat application, that might be: + - Defining message group size granularity: Direct messages, Group messages, Guild-based messages + - Defining what a room looks like + - Defining the APIs and behaviours required to send and receive messages + - Defining the APIs and behaviours required to perform commonly sought after things, such as + reacting to a message with an emoji + - etc. + + The goal is that all different polyproto-based chat applications should then implement this + shared behaviour. Of course, developers may absolutely add their own behaviours and + functionality which is perhaps exclusive to their specific implementation. Core functionality + remains commonly defined however, which should make all polyproto-based chat applications + interoperable in these defined, common behaviours. + +- or describe a **major** technological addition, which can be used in the "requires" section of + another P2 extension. This "requires" section can be thought of like the dependency list of a + software package. + + Technological additions might be: + - Defining APIs and behaviours needed to implement the MLS (Messaging Layer Security) Protocol + - Defining APIs and behaviours needed to establish and maintain a WebSocket connection, and how + to send/receive messages over this WebSocket connection. + +By using clay-brick-sized building blocks instead of more LEGO-sized building blocks like XMPP does, +we hope to mitigate this problem that we perceive, while still offering an extensible yet +well-defined platform to build on. ### Matrix/Element -Matrix is perhaps the closest we have yet gotten to federated chat software aimed towards a general audience. However, as a strong -believer in user experience - especially how first impressions impact new, non-technical users, I believe that Matrix falls flat in this -regard. A lot of peoples first experience with Matrix is the infamous "Could not decrypt: The senders device has not yet sent us the keys -for this message". The protocol and its sub-protocols are vast and complicated and use bespoke cryptography protocol implementations such -as Olm and Megolm, which, in the past, has already been the cause of high-caliber vulnerabilities (see: https://nebuchadnezzar-megolm.github.io/ -and, more recently, https://soatok.blog/2024/08/14/security-issues-in-matrixs-olm-library/#addendum-2024-08-14). - -Matrix is truly impressive from a technical standpoint. Its extremely low centralized architecture fills a niche which especially -people already interested in technology seem to enjoy. However, this invariably results in the fact that user experience has to be -compromised. It is of my opinion that while Matrix is relatively good at what it is doing, it is not a good fit to be a potential -Discord replacement. - -As for a comparison: We are taking a radically different approach to Matrix. Matrix aims for eventually-consistent federation of events -using cryptographically fully verifiable directed acyclic event graphs, where as polyproto, and by extension Polyphony, prioritize -usability above all, intentionally disregarding highly complex or novel data structures in favor of cryptographic verifiability -through digital signatures and simple public key infrastructure. +Matrix is perhaps the closest we have yet gotten to federated chat software aimed towards a general +audience. However, as a strong believer in user experience - especially how first impressions impact +new, non-technical users, I believe that Matrix falls flat in this regard. A lot of peoples first +experience with Matrix is the infamous "Could not decrypt: The senders device has not yet sent us +the keys for this message". The protocol and its sub-protocols are vast and complicated and use +bespoke cryptography protocol implementations such as Olm and Megolm, which, in the past, has +already been the cause of high-caliber vulnerabilities (see: +[Nebuchadnezzar](https://nebuchadnezzar-megolm.github.io/) and, more recently, +[Soatok's blog post](https://soatok.blog/2024/08/14/security-issues-in-matrixs-olm-library/#addendum-2024-08-14)). + +Matrix is truly impressive from a technical standpoint. Its extremely low centralized architecture +fills a niche which especially people already interested in technology seem to enjoy. However, this +invariably results in the fact that user experience has to be compromised. It is of my opinion that +while Matrix is relatively good at what it is doing, it is not a good fit to be a potential Discord +replacement. + +As for a comparison: We are taking a radically different approach to Matrix. Matrix aims for +eventually-consistent federation of events using cryptographically fully verifiable directed acyclic +event graphs, where as polyproto, and by extension Polyphony, prioritize usability above all, +intentionally disregarding highly complex or novel data structures in favor of cryptographic +verifiability through digital signatures and simple public key infrastructure. ## What are significant technical challenges you expect to solve during the project, if any? -Currently, our trust model acknowledges, that a users home server is able to create sessions and session tokens on the users' behalf, -and is thus able to listen in on unencrypted communications, or, in the case of a truly malicious admin, would even be able to send messages -on behalf of the user. This is not a novel problem, as it also affects all Mastodon ActivityPub servers in existence. Given that this -potential abuse risk has not been a large issue in the Fediverse, we expect this to also not be a major problem. However, I would like -to find additional mitigations or even a solution for this problem during further development of polyproto. - -Another area that will likely need more work is my current design for how to connect to multiple servers at once: Currently, I expect -every client to hold a WebSocket connection with each server that they are communicating with, at once. Depending on the amount of traffic, -this could lead to constantly high resource consumption for clients. If this turns out to be the case, I am sure that we can find plenty -of software- and protocol-side adjustments and improvements to implement - though it is still a potential technical challenge. - -My last major area of concern is how well transmission and de-/serializing of the X.509 based Identity Certificates will work. I am optimistic -about this however, since the X.500 series of RFCs are extremely well documented and already deeply explored, so that even if challenges -arise in this area, I am certain that there is enough literature on the exact problem we might be facing, and enough people to ask/talk to. +Currently, our trust model acknowledges, that a users home server is able to create sessions and +session tokens on the users' behalf, and is thus able to listen in on unencrypted communications, +or, in the case of a truly malicious admin, would even be able to send messages on behalf of the +user. This is not a novel problem, as it also affects all Mastodon ActivityPub servers in existence. +Given that this potential abuse risk has not been a large issue in the Fediverse, we expect this to +also not be a major problem. However, I would like to find additional mitigations or even a solution +for this problem during further development of polyproto. + +Another area that will likely need more work is my current design for how to connect to multiple +servers at once: Currently, I expect every client to hold a WebSocket connection with each server +that they are communicating with, at once. Depending on the amount of traffic, this could lead to +constantly high resource consumption for clients. If this turns out to be the case, I am sure that +we can find plenty of software- and protocol-side adjustments and improvements to implement - though +it is still a potential technical challenge. + +My last major area of concern is how well transmission and de-/serializing of the X.509 based +Identity Certificates will work. I am optimistic about this however, since the X.500 series of RFCs +are extremely well documented and already deeply explored, so that even if challenges arise in this +area, I am certain that there is enough literature on the exact problem we might be facing, and +enough people to ask/talk to. ## Describe the ecosystem of the project, and how you will engage with relevant actors and promote the outcomes? -As the commercialization of Discord.com steadily increases, it is becoming clear that people are looking for a usable alternative. -This is an audience that we are hoping to capture. Our Polyphony Chat service is Discord API compatible, so that actors may use -the Polyphony client to interact with both Discord.com and polyproto-chat-based instances, and that existing bots and automations -could potentially be ported over very easily. This essentially gives people looking for a Discord replacement exactly what they -are looking for, as there should be little to no additional concepts, behaviors or patterns that users have to learn or -re-learn to use our service. - -As previously touched on, we are blessed to already have made a great amount of connections to like-minded developers also working -on similar projects, who are looking optimistically towards polyproto as the tool to use to federate. I also have received explicit -permission from Spacebar Maintainers to promote our projects on their Discord Guild, which currently counts 3600 members. +As the commercialization of Discord.com steadily increases, it is becoming clear that people are +looking for a usable alternative. This is an audience that we are hoping to capture. Our Polyphony +Chat service is Discord API compatible, so that actors may use the Polyphony client to interact with +both Discord.com and polyproto-chat-based instances, and that existing bots and automations could +potentially be ported over very easily. This essentially gives people looking for a Discord +replacement exactly what they are looking for, as there should be little to no additional concepts, +behaviors or patterns that users have to learn or re-learn to use our service. + +As previously touched on, we are blessed to already have made a great amount of connections to +like-minded developers also working on similar projects, who are looking optimistically towards +polyproto as the tool to use to federate. I also have received explicit permission from Spacebar +Maintainers to promote our projects on their Discord Guild, which currently counts 3600 members. diff --git a/blog/2025-02-12-governing-polyproto.md b/blog/2025-02-12-governing-polyproto.md index aa5384d..f5c8c8d 100644 --- a/blog/2025-02-12-governing-polyproto.md +++ b/blog/2025-02-12-governing-polyproto.md @@ -2,34 +2,70 @@ date: 2025-02-12 draft: false categories: - - polyproto + - polyproto authors: - - bitfl0wer + - bitfl0wer title: Governing polyproto --- -There's precedence which suggests that governing federated software as a one-man-band is a bad idea. This begs the question: How would polyproto be governed? +There's precedence which suggests that governing federated software as a one-man-band is a bad idea. +This begs the question: How would polyproto be governed? -{/* truncate */} +{/_ truncate _/} -## I am in a somewhat interesting situation. +## I am in a somewhat interesting situation -Polyphony and polyproto are, at the moment, "just hobbies" for me. However, I *do* still really want this all to succeed long-term. These wishes do involve a lot of planning ahead, including thinking about passing on the torch, so that this fire does not die with me. A pretty uncommon thing to say about a hobby, I think. +Polyphony and polyproto are, at the moment, "just hobbies" for me. However, I _do_ still really want +this all to succeed long-term. These wishes do involve a lot of planning ahead, including thinking +about passing on the torch, so that this fire does not die with me. A pretty uncommon thing to say +about a hobby, I think. -It is pretty clear to me that I do not want to be the sole ruler of all things polyproto. I simply don't have the time, energy and skills to manage everything myself, should polyproto really become something that is used by a larger audience. polyproto must be an open standard, and in my opinion, to make a standard truly open is to eventually let go and involve other people in decision making processes. This is what I have been thinking about a lot lately, and I wanted to use this article as a space to share my thoughts and wishes for the future. +It is pretty clear to me that I do not want to be the sole ruler of all things polyproto. I simply +don't have the time, energy and skills to manage everything myself, should polyproto really become +something that is used by a larger audience. polyproto must be an open standard, and in my opinion, +to make a standard truly open is to eventually let go and involve other people in decision making +processes. This is what I have been thinking about a lot lately, and I wanted to use this article as +a space to share my thoughts and wishes for the future. -## Democracy! +## Democracy -To make a long story short, I believe that establishing a non-profit foundation or association that concerns itself with governing and developing polyproto is *the* way to go. The tasks of such an association would be: +To make a long story short, I believe that establishing a non-profit foundation or association that +concerns itself with governing and developing polyproto is _the_ way to go. The tasks of such an +association would be: -- **Coordinating and overseeing work on the protocol:** As the polyproto sees increased usage, changes and additions to the core protocol will most definitely need to be made to align the protocol with real-world needs and wants voiced by users and developers alike. The polyproto association would have the final say over what changes will be included in the next version of polyproto by reviewing, commenting on and accepting/rejecting change proposals submitted by people/groups. -- **Foster a collaborative environment:** I believe it would really *suck* if there were 12 (exaggeration) different p2-extensions defining what a Discord-like chat application should look like. The polyproto association should make efforts to foster, unify and certify "official" extensions and standards that all developers should use. I am not 100% sure about what this would look like; Imagine, someone develops a p2-extension defining what a Google Photos-like should look like. This person now comes to the polyproto association with a wish to get a "seal of approval" via getting their extension certified. The association members notice, that this extension is pretty awesome. What happens now? Should this extension be "absorbed" into the association somehow? Should only this exact version of the extension receive certification, requiring the developer to contact the association again if an update to the extension is made, to get the updated version certified as well? And if this is the case, say there happens to be a disagreement between the association and the developer on the contents of the extension update – what then? Does the association eventually "hard fork" the extension? This seems very complex, and I think that cases like these should be considered, given that sadly not every human being gets along with each other. In any case, the polyproto association should make the process of getting your extension proposal reviewed and seen by other people easier, so that extensions are not created in a vacuum, but through *collaboration*. -- **Act as a giant documentation resource:** The association should be a great source of information for interested developers. Documentation and explanations of concepts should be available in various levels of complexity and should always be up-to-date. +- **Coordinating and overseeing work on the protocol:** As the polyproto sees increased usage, + changes and additions to the core protocol will most definitely need to be made to align the + protocol with real-world needs and wants voiced by users and developers alike. The polyproto + association would have the final say over what changes will be included in the next version of + polyproto by reviewing, commenting on and accepting/rejecting change proposals submitted by + people/groups. +- **Foster a collaborative environment:** I believe it would really _suck_ if there were 12 + (exaggeration) different p2-extensions defining what a Discord-like chat application should look + like. The polyproto association should make efforts to foster, unify and certify "official" + extensions and standards that all developers should use. I am not 100% sure about what this would + look like; Imagine, someone develops a p2-extension defining what a Google Photos-like should look + like. This person now comes to the polyproto association with a wish to get a "seal of approval" + via getting their extension certified. The association members notice, that this extension is + pretty awesome. What happens now? Should this extension be "absorbed" into the association + somehow? Should only this exact version of the extension receive certification, requiring the + developer to contact the association again if an update to the extension is made, to get the + updated version certified as well? And if this is the case, say there happens to be a disagreement + between the association and the developer on the contents of the extension update – what then? + Does the association eventually "hard fork" the extension? This seems very complex, and I think + that cases like these should be considered, given that sadly not every human being gets along with + each other. In any case, the polyproto association should make the process of getting your + extension proposal reviewed and seen by other people easier, so that extensions are not created in + a vacuum, but through _collaboration_. +- **Act as a giant documentation resource:** The association should be a great source of information + for interested developers. Documentation and explanations of concepts should be available in + various levels of complexity and should always be up-to-date. ## Closing words -The time for something like this has not yet arrived. What this project needs in its infancy is rapid development and the freedom to change a great number of things quickly. +The time for something like this has not yet arrived. What this project needs in its infancy is +rapid development and the freedom to change a great number of things quickly. -What I need *right now* though is to get healthy again. As I am writing this, the flu is still doing a number on me. Bleh. +What I need _right now_ though is to get healthy again. As I am writing this, the flu is still doing +a number on me. Bleh. Stay healthy and stay safe! diff --git a/blog/2025-03-29-polyproto-public-beta.md b/blog/2025-03-29-polyproto-public-beta.md index b6aef65..20a6a9f 100644 --- a/blog/2025-03-29-polyproto-public-beta.md +++ b/blog/2025-03-29-polyproto-public-beta.md @@ -2,33 +2,33 @@ date: 2025-03-30 draft: true categories: - - polyproto + - polyproto authors: - - bitfl0wer + - bitfl0wer title: polyproto v1.0-beta --- -For well over a year, I have been working on polyproto—a new, user-friendly federation protocol. -I'm incredibly happy to finally release this first beta version to the public today. Let's talk about -it in-depth! +For well over a year, I have been working on polyproto—a new, user-friendly federation protocol. I'm +incredibly happy to finally release this first beta version to the public today. Let's talk about it +in-depth! -{/* truncate */} +{/_ truncate _/} ## The "what?"'s and "why?"'s -So, *what is* this "polyproto" thing even? Where did my perceived need for something new come from and -what existing problems does it solve? +So, _what is_ this "polyproto" thing even? Where did my perceived need for something new come from +and what existing problems does it solve? -As you might know, I am working on [Polyphony](https://github.com/polyphony-chat), a FO3S -(Free, Open-Source, *Self-hostable* Software) alternative to Discord, currently still in its infancy -(but growing up by the day!). Federation is an important topic for self-hostable chat software, and after +As you might know, I am working on [Polyphony](https://github.com/polyphony-chat), a FO3S (Free, +Open-Source, _Self-hostable_ Software) alternative to Discord, currently still in its infancy (but +growing up by the day!). Federation is an important topic for self-hostable chat software, and after looking into the topic for a bit, the following became clear: -- People, especially technical users, *want* federation and decentralization. -- People, especially non-technical users never want to have to think about federation, how it - works and what it means. -- People do not want all their stuff to be gone if their home server says "buh-bye" for - any reason. Account portability is a must. +- People, especially technical users, _want_ federation and decentralization. +- People, especially non-technical users never want to have to think about federation, how it works + and what it means. +- People do not want all their stuff to be gone if their home server says "buh-bye" for any reason. + Account portability is a must. :::info "About ATProto" @@ -50,17 +50,16 @@ looking into the topic for a bit, the following became clear: really decentralized. But [that is its own rabbit hole](https://blog.muni.town/atproto-isnt-what-you-think/) entirely. -During my research into existing protocols which fulfill these criteria, the following are the most promising -ones I have encountered. I will also explain why I did not end up choosing them. +During my research into existing protocols which fulfill these criteria, the following are the most +promising ones I have encountered. I will also explain why I did not end up choosing them. ### XMPP Much like XMPP, I have decided to make polyproto an extensible protocol. I am of the opinion that XMPPs biggest downfall is how many extensions there are, and that a server -aiming to be compatible with other -implementations of XMPP-based chat services should aim to implement all of the XEPs to be a -viable choice. +aiming to be compatible with other implementations of XMPP-based chat services should aim to +implement all of the XEPs to be a viable choice. polyproto is actively trying to circumvent this by limiting polyproto extensions (P2 extensions for short) to @@ -69,28 +68,31 @@ short) to is, for example, a chat application, a microblogging application or an image blogging application. Service extensions should be the core functionality that is universally needed to make an application function. In the case of a chat application, that might be: - - - Defining message group size granularity: Direct messages, Group messages, Guild-based messages - - Defining what a room looks like - - Defining the APIs and behaviors required to send and receive messages - - Defining the APIs and behaviors required to perform commonly sought after things, such as reacting to a message with an emoji - - etc. - - The goal is that all different polyproto-based chat applications should then implement this shared behavior. Of course, developers - may absolutely add their own behaviors and functionality which is perhaps exclusive to their specific implementation. Core - functionality remains commonly defined however, which should make all polyproto-based chat applications interoperable in these - defined, common behaviors. - -- or describe a **major** technological addition, which can be used in the "requires" section of another P2 extension. This "requires" - section can be thought of like the dependency list of a software package. - - Technological additions might be: - - Defining APIs and behaviors needed to implement the MLS (Messaging Layer Security) Protocol - - Defining APIs and behaviors needed to establish and maintain a WebSocket connection, and how to send/receive messages over this - WebSocket connection. - -By using clay-brick-sized building blocks instead of more LEGO-sized building blocks like XMPP does, we hope to mitigate this problem -that we perceive, while still offering an extensible yet well-defined platform to build on. + - Defining message group size granularity: Direct messages, Group messages, Guild-based messages + - Defining what a room looks like + - Defining the APIs and behaviors required to send and receive messages + - Defining the APIs and behaviors required to perform commonly sought after things, such as + reacting to a message with an emoji + - etc. + + The goal is that all different polyproto-based chat applications should then implement this + shared behavior. Of course, developers may absolutely add their own behaviors and functionality + which is perhaps exclusive to their specific implementation. Core functionality remains commonly + defined however, which should make all polyproto-based chat applications interoperable in these + defined, common behaviors. + +- or describe a **major** technological addition, which can be used in the "requires" section of + another P2 extension. This "requires" section can be thought of like the dependency list of a + software package. + + Technological additions might be: + - Defining APIs and behaviors needed to implement the MLS (Messaging Layer Security) Protocol + - Defining APIs and behaviors needed to establish and maintain a WebSocket connection, and how + to send/receive messages over this WebSocket connection. + +By using clay-brick-sized building blocks instead of more LEGO-sized building blocks like XMPP does, +we hope to mitigate this problem that we perceive, while still offering an extensible yet +well-defined platform to build on. Account portability has been implemented in XMPP with [XEP-0227: Portable Import/Export Format for XMPP-IM Servers](https://xmpp.org/extensions/xep-0227.html). diff --git a/blog/authors.yml b/blog/authors.yml index 6f74c03..0b5e8f6 100644 --- a/blog/authors.yml +++ b/blog/authors.yml @@ -1,4 +1,4 @@ bitfl0wer: - name: bitfl0wer - description: Lead silly individual - avatar: https://avatars.githubusercontent.com/u/39242991?v=4 + name: bitfl0wer + description: Lead silly individual + avatar: https://avatars.githubusercontent.com/u/39242991?v=4 diff --git a/blog/tags.yml b/blog/tags.yml index bfaa778..45f6f11 100644 --- a/blog/tags.yml +++ b/blog/tags.yml @@ -1,19 +1,19 @@ facebook: - label: Facebook - permalink: /facebook - description: Facebook tag description + label: Facebook + permalink: /facebook + description: Facebook tag description hello: - label: Hello - permalink: /hello - description: Hello tag description + label: Hello + permalink: /hello + description: Hello tag description docusaurus: - label: Docusaurus - permalink: /docusaurus - description: Docusaurus tag description + label: Docusaurus + permalink: /docusaurus + description: Docusaurus tag description hola: - label: Hola - permalink: /hola - description: Hola tag description + label: Hola + permalink: /hola + description: Hola tag description diff --git a/docs/CNAME b/docs/CNAME index 3f86173..cdc2171 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -polyproto.org \ No newline at end of file +polyproto.org diff --git a/docs/contribute.md b/docs/contribute.md index 929780e..3758f8d 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -18,35 +18,37 @@ polyprotos "flagship" library is written in Rust. If you are interested in helpi Rust, then the [polyproto-rs](https://github.com/polyphony-chat/polyproto-rs) crate is an obvious choice for that. Here's a bunch of things that you could always pursue: -- Look at the issues tab! Do you see anything that is interesting to you? If the issue is not already - assigned to someone, feel free to leave a comment requesting more information, or notifying us that - you intend to work on this issue, so that we can avoid duplicate work and support you as best as we - can! +- Look at the issues tab! Do you see anything that is interesting to you? If the issue is not + already assigned to someone, feel free to leave a comment requesting more information, or + notifying us that you intend to work on this issue, so that we can avoid duplicate work and + support you as best as we can! - Check out the `examples` directory! The more, the merrier! Providing examples for other coders is - invaluable work and can give you a deeper understanding of how the library works at its core at the - same time! -- Check out the [coverage report](https://coveralls.io/github/polyphony-chat/polyproto-rs?branch=main) - and write tests for parts of the code which are not yet covered by tests! The tests you write should - make sense, of course. Coverage alone is not a valuable metric. Do not hesitate to ask through an + invaluable work and can give you a deeper understanding of how the library works at its core at + the same time! +- Check out the + [coverage report](https://coveralls.io/github/polyphony-chat/polyproto-rs?branch=main) and write + tests for parts of the code which are not yet covered by tests! The tests you write should make + sense, of course. Coverage alone is not a valuable metric. Do not hesitate to ask through an issue, through our Discord or via IRC, if you have any questions whatsoever! All of the above apply to any piece of code that is maintained by the polyphony-chat organization. ### Non-Rust related things -We think that Rust is great, but we also understand that it is not for everyone, and that it has quite -the learning curve. Luckily, all languages are great in their own regard. +We think that Rust is great, but we also understand that it is not for everyone, and that it has +quite the learning curve. Luckily, all languages are great in their own regard. -Having a flagship crate that is written in Rust is great and all, but if you are a Python, TypeScript, -C++, Java, C# or \ developer, you can't really do anything with that Rust crate. +Having a flagship crate that is written in Rust is great and all, but if you are a Python, +TypeScript, C++, Java, C# or \ developer, you can't really do anything with that +Rust crate. -If you are not a Rust developer but *a* developer, you can always start your own polyproto library project -in the language you love most! Again, please feel absolutely free to reach out to us and ask as -many questions as you'd like. We'd love to support you in your effort as much as we can! Making +If you are not a Rust developer but _a_ developer, you can always start your own polyproto library +project in the language you love most! Again, please feel absolutely free to reach out to us and ask +as many questions as you'd like. We'd love to support you in your effort as much as we can! Making polyproto available in multiple languages is an amazing idea and an effort that is always welcomed. ## Writing If you are not a coder, but a human nonetheless (or a robot, or some sort of sentient animal!), you -can always skim the things we wrote (including this very guide) and refine unclear or redundant areas, -or perfect passages that are written less eloquently than you'd like. :heart: +can always skim the things we wrote (including this very guide) and refine unclear or redundant +areas, or perfect passages that are written less eloquently than you'd like. :heart: diff --git a/docs/intro.md b/docs/intro.md index 6a19a9b..2d27e5e 100644 --- a/docs/intro.md +++ b/docs/intro.md @@ -11,15 +11,15 @@ what the polyproto protocol is about. ::: -polyproto is a federated identity and message exchange protocol, which can be used for almost anything. -If you'd like to build an application where federation, user control and data integrity are wanted, -then polyproto is most likely for you. Read this overview to get to know the core concepts and technologies -used in the protocol. +polyproto is a federated identity and message exchange protocol, which can be used for almost +anything. If you'd like to build an application where federation, user control and data integrity +are wanted, then polyproto is most likely for you. Read this overview to get to know the core +concepts and technologies used in the protocol. ## Identity -Your identity is always represented by a Federation ID, FID for short. Conceptually, FIDs are nothing new, -and they look like this: +Your identity is always represented by a Federation ID, FID for short. Conceptually, FIDs are +nothing new, and they look like this: `xenia@some.example.com` @@ -28,14 +28,14 @@ Together, this makes for an individual, yet globally unique identifier. ## Certificates and Keys -Identity Certificates - ID-Certs for short - represent your identity when logged in on different sessions. -Each Identity Certificate contains the following information: +Identity Certificates - ID-Certs for short - represent your identity when logged in on different +sessions. Each Identity Certificate contains the following information: - Your federation ID, so that an account can be uniquely identified - A session ID, which is unique for each session and does not change, even if the keys change - An expiry date, after which the certificate becomes invalid -- A signature, generated by your home server, which acts as part of the proof that this certificate was - actually issued by your home server +- A signature, generated by your home server, which acts as part of the proof that this certificate + was actually issued by your home server - Some information from your home server (Home server domain, certificate serial number) - Information about the signature algorithm used @@ -43,15 +43,16 @@ and, last but not least - The public identity key of the client -For the sake of explanation, the most important parts here are the **client public identity key**, your -**federation ID**, the **home servers' domain** and the **home servers' signature for this certificate**. +For the sake of explanation, the most important parts here are the **client public identity key**, +your **federation ID**, the **home servers' domain** and the **home servers' signature for this +certificate**. ## Message signing -When you, for example, chat with someone on a different server, that other server is fully in control -about what data it chooses to present to you. To make sure that this server is always telling you the -truth, and not, for example, manufacturing chat messages or social media posts made by a person, messages -are signed using a clients' public identity key. +When you, for example, chat with someone on a different server, that other server is fully in +control about what data it chooses to present to you. To make sure that this server is always +telling you the truth, and not, for example, manufacturing chat messages or social media posts made +by a person, messages are signed using a clients' public identity key. ```mermaid flowchart LR @@ -65,33 +66,36 @@ flowchart LR This is how it works: -- As touched on previously, every user client has an own identity key pair, comprised of a public and - a private key. The public key is cryptographically linked to the private key, meaning that this public - key can not belong to another private key. Signing data is done using the private key, which ONLY the - client knows. Everyone can then use your public key to prove that this signature was generated by - your client, and that the signature matches the data which was signed. -- Signatures are unique to a piece of data, meaning that two differing pieces of data signed by the same - or different private keys will always[^1] produce different signatures. This is the case, even if the - data only differs minutely (be it by a single space, or a single comma). -- Your home server attests to a clients' key pair, by creating a certificate for your public key, which - it signs with its own secret, public/private key pair, and then sends to you. Your *private* key is never - sent anywhere at all, and it does not need to be. - -[^1]: Signature/hash collisions, which although theoretically possible, are extraordinarily infrequent and thus, negligible in practical scenarios. - -Now, your public identity key and your home servers' identity key are 'linked' to each other. This is -represented in the ID-Cert you then receive from your home server. - -- When communicating with another "foreign" server in polyproto, you first send that server your ID-Cert. - The server can then prove the validity of your identity, simply by asking your home server for its public - key and performing a quick signature verification. -- When sending data to the server, such as chat messages, your client computes the signature for that - message using your private key, and attaches this signature to the message you send to other servers. -- Any user, at any point, can now take this signature, your identity certificate and your home servers' - public key and cryptographically verify that it was, in fact, you who sent the message, and that the - message was not tampered with in any way. To distribute the load of ID-Cert requests more evenly, it - is always the duty of the server that the data exchange is happening on, to cache and hand out ID-Certs - of users. +- As touched on previously, every user client has an own identity key pair, comprised of a public + and a private key. The public key is cryptographically linked to the private key, meaning that + this public key can not belong to another private key. Signing data is done using the private key, + which ONLY the client knows. Everyone can then use your public key to prove that this signature + was generated by your client, and that the signature matches the data which was signed. +- Signatures are unique to a piece of data, meaning that two differing pieces of data signed by the + same or different private keys will always[^1] produce different signatures. This is the case, + even if the data only differs minutely (be it by a single space, or a single comma). +- Your home server attests to a clients' key pair, by creating a certificate for your public key, + which it signs with its own secret, public/private key pair, and then sends to you. Your _private_ + key is never sent anywhere at all, and it does not need to be. + +[^1]: + Signature/hash collisions, which although theoretically possible, are extraordinarily infrequent + and thus, negligible in practical scenarios. + +Now, your public identity key and your home servers' identity key are 'linked' to each other. This +is represented in the ID-Cert you then receive from your home server. + +- When communicating with another "foreign" server in polyproto, you first send that server your + ID-Cert. The server can then prove the validity of your identity, simply by asking your home + server for its public key and performing a quick signature verification. +- When sending data to the server, such as chat messages, your client computes the signature for + that message using your private key, and attaches this signature to the message you send to other + servers. +- Any user, at any point, can now take this signature, your identity certificate and your home + servers' public key and cryptographically verify that it was, in fact, you who sent the message, + and that the message was not tampered with in any way. To distribute the load of ID-Cert requests + more evenly, it is always the duty of the server that the data exchange is happening on, to cache + and hand out ID-Certs of users. :::info @@ -107,39 +111,41 @@ Trusting the smallest possible amount of entities is great practice when it come polyproto makes sure that almost everyone you do trust is under constant scrutiny, and thus provides measures to verify a data authors' identity, and that the actual data has not been tampered with. -Aside from yourself, the entity with the most trust assigned to it is your home server. Creating your -identity on a specific home server is a pledge from that server and its admins to you, where they promise -not to create sessions on your behalf, or to otherwise perform actions which can be publicly identified -to be carried out by you, without your explicit consent. +Aside from yourself, the entity with the most trust assigned to it is your home server. Creating +your identity on a specific home server is a pledge from that server and its admins to you, where +they promise not to create sessions on your behalf, or to otherwise perform actions which can be +publicly identified to be carried out by you, without your explicit consent. -Should you ever change your mind about your home server's trustworthiness, you can always migrate to another -server while keeping the ownership status of your data on all servers you have sent data to, even if -your home server is offline indefinitely. +Should you ever change your mind about your home server's trustworthiness, you can always migrate to +another server while keeping the ownership status of your data on all servers you have sent data to, +even if your home server is offline indefinitely. ## Multi-use -polyprotos' API definitions and specification document intentionally leave space for implementation-specific -data to be sent, where it makes sense. Nothing about the core protocol makes polyproto inherently unsuitable -for any purpose. +polyprotos' API definitions and specification document intentionally leave space for +implementation-specific data to be sent, where it makes sense. Nothing about the core protocol makes +polyproto inherently unsuitable for any purpose. ## Federation Federation in polyproto means using one identity or client to interact with multiple servers or even -services at once. Implementing federation is straightforward, and entirely seamless to use for end users. +services at once. Implementing federation is straightforward, and entirely seamless to use for end +users. ## Technology -Probably the most refreshing aspect about this new protocol is, that it is really boring. There is really -nothing new about any given atomic aspect of polyproto. Polyproto uses well-known, tried and battle-tested -technologies, such as asymmetric encryption, X.509-based public key infrastructure and -certificates, digital signatures, -JSON over REST and other, already well established technologies such as WebSockets. +Probably the most refreshing aspect about this new protocol is, that it is really boring. There is +really nothing new about any given atomic aspect of polyproto. Polyproto uses well-known, tried and +battle-tested technologies, such as asymmetric encryption, X.509-based public key infrastructure and +-certificates, digital signatures, JSON over REST and other, already well established technologies +such as WebSockets. -polyproto should be effortless - both for developers and for end users, who, ideally, should never have to -notice any of the technical stuff going on in the background. +polyproto should be effortless - both for developers and for end users, who, ideally, should never +have to notice any of the technical stuff going on in the background. ## Conclusion -This is just an outline about how polyproto works. The goal with this outline is to inform about the most -relevant parts, while intentionally leaving out some details for the sake of clarity. If you have read -and understood this overview, you should have no - or at least way less - trouble reading the -[full protocol specification](protocols/core/), which covers a lot more details! +This is just an outline about how polyproto works. The goal with this outline is to inform about the +most relevant parts, while intentionally leaving out some details for the sake of clarity. If you +have read and understood this overview, you should have no - or at least way less - trouble reading +the [full protocol specification](protocols/core/), which covers a lot more details! diff --git a/docs/protocols/_category_.json b/docs/protocols/_category_.json index 2c74c49..22f1398 100644 --- a/docs/protocols/_category_.json +++ b/docs/protocols/_category_.json @@ -1,8 +1,8 @@ { - "label": "Protocol Definitions", - "position": 2, - "link": { - "type": "generated-index", - "description": "The core protocol definition and polyproto.org-defined p2 extensions." - } -} \ No newline at end of file + "label": "Protocol Definitions", + "position": 2, + "link": { + "type": "generated-index", + "description": "The core protocol definition and polyproto.org-defined p2 extensions." + } +} diff --git a/docs/protocols/auth.md b/docs/protocols/auth.md index aa80921..60ec8ff 100644 --- a/docs/protocols/auth.md +++ b/docs/protocols/auth.md @@ -7,8 +7,8 @@ **Base Path:** `/.p2/auth/v1/` **v1.0.0-alpha.1** - Treat this as an unfinished draft. -[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this specification. -The version number specified here also applies to the API documentation. +[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this +specification. The version number specified here also applies to the API documentation. The `polyproto-auth` extension is a protocol extension for polyproto that provides a basic authentication mechanism to register new users and authenticate existing users. @@ -16,9 +16,8 @@ authentication mechanism to register new users and authenticate existing users. ## 1. Registration of a new actor Registering a new actor in the context of polyproto is done through an API route defined in the -polyproto-auth -["No registration needed" API](/APIs/Core/Routes%3A No registration needed/#post-create-identity) -documentation. +polyproto-auth ["No registration needed" API](/APIs/Core/Routes%3A No registration +needed/#post-create-identity) documentation. ## 1.1 Registering a new actor on a polyproto home server @@ -90,8 +89,8 @@ ID-Cert's origin, the challenge string's signature, and the ID-Cert's validity. If the verification is successful, the foreign server can issue a session token to the actor. -**Example:** -Say that Alice is on server A, and wants to authenticate on Server B, using her existing identity. +**Example:** Say that Alice is on server A, and wants to authenticate on Server B, using her +existing identity. Alice's client sends a request to Server B for a challenge string, telling Server B the session ID they are communicating from in the process. Upon receiving a response, Alice signs this challenge @@ -120,9 +119,9 @@ sb->>a: Session token, optional payload Fig. 3: Sequence diagram of a successful identity verification. -In the diagram, Alice's "optional payload" is extra data that might be requested by servers. -This is useful when using a single identity across various polyproto implementations, due to -differing information needs. The payload is signed with the actor's private identity key. +In the diagram, Alice's "optional payload" is extra data that might be requested by servers. This is +useful when using a single identity across various polyproto implementations, due to differing +information needs. The payload is signed with the actor's private identity key. Likewise, the "optional payload" sent by the server in the above diagram can be used by implementations to send additional information to the client. An example might be initial account @@ -140,7 +139,7 @@ information. If Alice's session token expires, they can repeat this process of requesting a challenge string and, together with her ID-Cert, exchange it for a session token. However, if Alice wants to access this third party account from a completely new device, they will have to perform the steps described in -section [1.2](#12-authenticating-a-new-client-on-a-polyproto-home-server) to obtain a valid -ID-Cert for that session. +section [1.2](#12-authenticating-a-new-client-on-a-polyproto-home-server) to obtain a valid ID-Cert +for that session. --- diff --git a/docs/protocols/chat.md b/docs/protocols/chat.md index 7de4355..82daa2e 100644 --- a/docs/protocols/chat.md +++ b/docs/protocols/chat.md @@ -2,10 +2,11 @@ :::danger -This specification document is wildly **incomplete and outdated**! It needs to be (and will be) reworked and does not -represent the current state of polyproto-chat. +This specification document is wildly **incomplete and outdated**! It needs to be (and will be) +reworked and does not represent the current state of polyproto-chat. -Please check out [the OpenAPI specification(s)](https://github.com/polyphony-chat/typespec-openapi/tree/main/build/3.1.0) +Please check out +[the OpenAPI specification(s)](https://github.com/polyphony-chat/typespec-openapi/tree/main/build/3.1.0) of polyproto-chat instead, as they provide a much better overview of currently thought-of APIs and behaviors. @@ -17,21 +18,26 @@ behaviors. **Base Path:** `/.p2/chat/v1/` -polyproto-chat is a federated chat service for communities, built on the polyproto protocol. -This specification defines the HTTP API endpoints for core chat functionality including guilds, -channels, messages, and user management. +[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this +specification. + +polyproto-chat is a federated chat service for communities, built on the polyproto protocol. This +specification defines the HTTP API endpoints for core chat functionality including guilds, channels, +messages, and user management. ## Authentication and Authorization -All endpoints requiring authentication use Bearer token authentication as defined in the polyproto core specification. Session tokens are obtained through the core authentication flow and ID-Cert verification process. +All endpoints requiring authentication use Bearer token authentication as defined in the polyproto +core specification. Session tokens are obtained through the core authentication flow and ID-Cert +verification process. ### Authorization Levels -- **Public**: No authentication required -- **Authenticated**: Requires valid session token -- **Guild Member**: Requires guild membership -- **Guild Admin**: Requires administrative permissions in guild -- **Server Admin**: Requires server-wide administrative permissions +- **Public**: No authentication required +- **Authenticated**: Requires valid session token +- **Guild Member**: Requires guild membership +- **Guild Admin**: Requires administrative permissions in guild +- **Server Admin**: Requires server-wide administrative permissions ## Core Entities @@ -94,15 +100,15 @@ Retrieve information about a specific guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Public for discoverable guilds, Guild Member for private guilds **Responses:** -- `200`: Guild object -- `403`: Access forbidden -- `404`: Guild not found +- `200`: Guild object +- `403`: Access forbidden +- `404`: Guild not found ### Create Guild @@ -124,9 +130,9 @@ Create a new guild. Requires authentication. **Responses:** -- `201`: Guild created successfully -- `400`: Invalid request body -- `403`: Insufficient permissions +- `201`: Guild created successfully +- `400`: Invalid request body +- `403`: Insufficient permissions ### Update Guild @@ -136,7 +142,7 @@ Update guild settings. Requires guild administrative permissions. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Admin @@ -152,9 +158,9 @@ Update guild settings. Requires guild administrative permissions. **Responses:** -- `200`: Guild updated successfully -- `403`: Insufficient permissions -- `404`: Guild not found +- `200`: Guild updated successfully +- `403`: Insufficient permissions +- `404`: Guild not found ### Delete Guild @@ -164,19 +170,19 @@ Delete a guild. Only the guild owner can perform this action. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Owner **Headers:** -- `X-P2-Sensitive-Solution` (required): Second factor authentication for sensitive action +- `X-P2-Sensitive-Solution` (required): Second factor authentication for sensitive action **Responses:** -- `204`: Guild deleted successfully -- `403`: Insufficient permissions -- `404`: Guild not found +- `204`: Guild deleted successfully +- `403`: Insufficient permissions +- `404`: Guild not found ## Channel Management @@ -188,15 +194,15 @@ Retrieve all channels in a guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Member **Responses:** -- `200`: Array of channel objects -- `403`: Access forbidden -- `404`: Guild not found +- `200`: Array of channel objects +- `403`: Access forbidden +- `404`: Guild not found ### Create Channel @@ -206,7 +212,7 @@ Create a new channel in a guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Admin @@ -223,9 +229,9 @@ Create a new channel in a guild. **Responses:** -- `201`: Channel created successfully -- `400`: Invalid request body -- `403`: Insufficient permissions +- `201`: Channel created successfully +- `400`: Invalid request body +- `403`: Insufficient permissions ### Update Channel @@ -235,7 +241,7 @@ Update channel settings. **Parameters:** -- `channel_id` (path, required): The ID of the channel +- `channel_id` (path, required): The ID of the channel **Authorization:** Guild Admin @@ -251,9 +257,9 @@ Update channel settings. **Responses:** -- `200`: Channel updated successfully -- `403`: Insufficient permissions -- `404`: Channel not found +- `200`: Channel updated successfully +- `403`: Insufficient permissions +- `404`: Channel not found ### Delete Channel @@ -263,15 +269,15 @@ Delete a channel. **Parameters:** -- `channel_id` (path, required): The ID of the channel +- `channel_id` (path, required): The ID of the channel **Authorization:** Guild Admin **Responses:** -- `204`: Channel deleted successfully -- `403`: Insufficient permissions -- `404`: Channel not found +- `204`: Channel deleted successfully +- `403`: Insufficient permissions +- `404`: Channel not found ## Message Management @@ -283,18 +289,18 @@ Retrieve messages from a channel with optional pagination. **Parameters:** -- `channel_id` (path, required): The ID of the channel -- `limit` (query, optional): Maximum number of messages to return (default: 50, max: 100) -- `before` (query, optional): Get messages before this message ID -- `after` (query, optional): Get messages after this message ID +- `channel_id` (path, required): The ID of the channel +- `limit` (query, optional): Maximum number of messages to return (default: 50, max: 100) +- `before` (query, optional): Get messages before this message ID +- `after` (query, optional): Get messages after this message ID **Authorization:** Guild Member **Responses:** -- `200`: Array of message objects -- `403`: Access forbidden -- `404`: Channel not found +- `200`: Array of message objects +- `403`: Access forbidden +- `404`: Channel not found ### Send Message @@ -304,7 +310,7 @@ Send a message to a channel. Messages must be signed with the actor's private ke **Parameters:** -- `channel_id` (path, required): The ID of the channel +- `channel_id` (path, required): The ID of the channel **Authorization:** Guild Member @@ -321,10 +327,10 @@ Send a message to a channel. Messages must be signed with the actor's private ke **Responses:** -- `201`: Message sent successfully -- `400`: Invalid message content or signature -- `403`: Insufficient permissions -- `404`: Channel not found +- `201`: Message sent successfully +- `400`: Invalid message content or signature +- `403`: Insufficient permissions +- `404`: Channel not found ### Edit Message @@ -334,8 +340,8 @@ Edit a previously sent message. Only the message author can edit their messages. **Parameters:** -- `channel_id` (path, required): The ID of the channel -- `message_id` (path, required): The ID of the message +- `channel_id` (path, required): The ID of the channel +- `message_id` (path, required): The ID of the message **Authorization:** Message Author @@ -350,10 +356,10 @@ Edit a previously sent message. Only the message author can edit their messages. **Responses:** -- `200`: Message edited successfully -- `400`: Invalid content or signature -- `403`: Insufficient permissions -- `404`: Message not found +- `200`: Message edited successfully +- `400`: Invalid content or signature +- `403`: Insufficient permissions +- `404`: Message not found ### Delete Message @@ -363,16 +369,16 @@ Delete a message. Authors can delete their own messages, administrators can dele **Parameters:** -- `channel_id` (path, required): The ID of the channel -- `message_id` (path, required): The ID of the message +- `channel_id` (path, required): The ID of the channel +- `message_id` (path, required): The ID of the message **Authorization:** Message Author or Guild Admin **Responses:** -- `204`: Message deleted successfully -- `403`: Insufficient permissions -- `404`: Message not found +- `204`: Message deleted successfully +- `403`: Insufficient permissions +- `404`: Message not found ## Guild Membership @@ -384,17 +390,17 @@ Retrieve guild member list with optional pagination. **Parameters:** -- `guild_id` (path, required): The ID of the guild -- `limit` (query, optional): Maximum number of members to return (default: 100) -- `after` (query, optional): Get members after this user ID +- `guild_id` (path, required): The ID of the guild +- `limit` (query, optional): Maximum number of members to return (default: 100) +- `after` (query, optional): Get members after this user ID **Authorization:** Guild Member **Responses:** -- `200`: Array of member objects -- `403`: Access forbidden -- `404`: Guild not found +- `200`: Array of member objects +- `403`: Access forbidden +- `404`: Guild not found ### Join Guild @@ -404,7 +410,7 @@ Join a guild using an invite code or if the guild is public. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Authenticated @@ -418,10 +424,10 @@ Join a guild using an invite code or if the guild is public. **Responses:** -- `201`: Successfully joined guild -- `400`: Invalid invite code -- `403`: Cannot join guild -- `404`: Guild not found +- `201`: Successfully joined guild +- `400`: Invalid invite code +- `403`: Cannot join guild +- `404`: Guild not found ### Leave Guild @@ -431,14 +437,14 @@ Leave a guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Member **Responses:** -- `204`: Successfully left guild -- `404`: Guild not found or not a member +- `204`: Successfully left guild +- `404`: Guild not found or not a member ### Kick Member @@ -448,16 +454,16 @@ Remove a member from the guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild -- `user_id` (path, required): The federation ID of the user to kick +- `guild_id` (path, required): The ID of the guild +- `user_id` (path, required): The federation ID of the user to kick **Authorization:** Guild Admin **Responses:** -- `204`: Member kicked successfully -- `403`: Insufficient permissions -- `404`: Guild or member not found +- `204`: Member kicked successfully +- `403`: Insufficient permissions +- `404`: Guild or member not found ## Direct Messages @@ -479,10 +485,10 @@ Create or retrieve a direct message channel with another user. **Responses:** -- `200`: Existing DM channel -- `201`: New DM channel created -- `400`: Invalid recipient -- `404`: User not found +- `200`: Existing DM channel +- `201`: New DM channel created +- `400`: Invalid recipient +- `404`: User not found ### Get DM Channels @@ -494,7 +500,7 @@ Retrieve all direct message channels for the authenticated user. **Responses:** -- `200`: Array of DM channel objects +- `200`: Array of DM channel objects ## File Attachments @@ -508,15 +514,15 @@ Upload a file attachment using the RawR resource system. **Headers:** -- `Content-Length` (required): Size of the file in bytes +- `Content-Length` (required): Size of the file in bytes **Request Body:** Binary file data **Responses:** -- `201`: File uploaded successfully with resource ID -- `413`: File too large -- `415`: Unsupported file type +- `201`: File uploaded successfully with resource ID +- `413`: File too large +- `415`: Unsupported file type ### Get Attachment @@ -526,15 +532,15 @@ Retrieve a file attachment by resource ID. **Parameters:** -- `resource_id` (path, required): The resource ID of the attachment +- `resource_id` (path, required): The resource ID of the attachment **Authorization:** Context-dependent based on message permissions **Responses:** -- `200`: File data -- `403`: Access forbidden -- `404`: Attachment not found +- `200`: File data +- `403`: Access forbidden +- `404`: Attachment not found ## Guild Invites @@ -546,7 +552,7 @@ Create an invite for a guild channel. **Parameters:** -- `channel_id` (path, required): The ID of the channel +- `channel_id` (path, required): The ID of the channel **Authorization:** Guild Member with invite permissions @@ -562,9 +568,9 @@ Create an invite for a guild channel. **Responses:** -- `201`: Invite created successfully -- `403`: Insufficient permissions -- `404`: Channel not found +- `201`: Invite created successfully +- `403`: Insufficient permissions +- `404`: Channel not found ### Get Guild Invites @@ -574,15 +580,15 @@ Retrieve all active invites for a guild. **Parameters:** -- `guild_id` (path, required): The ID of the guild +- `guild_id` (path, required): The ID of the guild **Authorization:** Guild Admin **Responses:** -- `200`: Array of invite objects -- `403`: Insufficient permissions -- `404`: Guild not found +- `200`: Array of invite objects +- `403`: Insufficient permissions +- `404`: Guild not found ### Delete Invite @@ -592,19 +598,20 @@ Delete/revoke an invite. **Parameters:** -- `invite_code` (path, required): The invite code to revoke +- `invite_code` (path, required): The invite code to revoke **Authorization:** Guild Admin or Invite Creator **Responses:** -- `204`: Invite deleted successfully -- `403`: Insufficient permissions -- `404`: Invite not found +- `204`: Invite deleted successfully +- `403`: Insufficient permissions +- `404`: Invite not found ## WebSocket Events -polyproto-chat extends the core WebSocket protocol with chat-specific events. Clients must subscribe to the `chat` service channel to receive these events. +polyproto-chat extends the core WebSocket protocol with chat-specific events. Clients must subscribe +to the `chat` service channel to receive these events. ### Chat Opcodes @@ -637,12 +644,12 @@ All endpoints follow standard HTTP status codes and return error objects in the Rate limits are applied per authenticated user and follow Discord-like patterns: -- Message sending: 5 messages per 5 seconds per channel -- Channel creation: 5 channels per 5 minutes per guild -- Guild operations: 5 operations per minute +- Message sending: 5 messages per 5 seconds per channel +- Channel creation: 5 channels per 5 minutes per guild +- Guild operations: 5 operations per minute Rate limit headers are included in responses: -- `X-RateLimit-Limit`: Request limit for the endpoint -- `X-RateLimit-Remaining`: Remaining requests in current window -- `X-RateLimit-Reset`: Time when rate limit resets +- `X-RateLimit-Limit`: Request limit for the endpoint +- `X-RateLimit-Remaining`: Remaining requests in current window +- `X-RateLimit-Reset`: Time when rate limit resets diff --git a/docs/protocols/core.md b/docs/protocols/core.md index 22c00ea..0181b1f 100644 --- a/docs/protocols/core.md +++ b/docs/protocols/core.md @@ -8,7 +8,7 @@ title: polyproto Core Protocol Specification **Namespace:** `core` -**Version:** `v1.0-beta.1` +**Version:** `v1.0-beta.2` **Base Path:** `/.p2/core/v1/` @@ -22,16 +22,16 @@ inconsistencies, missing or duplicate information and other mistakes at ::: -[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this specification. +[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this +specification. The polyproto protocol is a home-server-based identity federation protocol specification intended -for use in applications where actor identity is needed. polyproto focuses on federated identity -and does not specify any further application-specific features. It can be used standalone, as a -method of authenticating across many applications and services, or as a base for federated protocol -extensions and application implementations. The use of cryptography—namely digital -signatures and X.509 certificates—make polyproto identities verifiable and portable. polyproto -empowers actors, as the home server can be changed at any time, without losing data or connections -to other actors. +for use in applications where actor identity is needed. polyproto focuses on federated identity and +does not specify any further application-specific features. It can be used standalone, as a method +of authenticating across many applications and services, or as a base for federated protocol +extensions and application implementations. The use of cryptography—namely digital signatures and +X.509 certificates—make polyproto identities verifiable and portable. polyproto empowers actors, as +the home server can be changed at any time, without losing data or connections to other actors. This document is intended to be used as a starting point for developers wanting to develop software that can operate with other polyproto implementations. @@ -47,27 +47,26 @@ TODO: glossary is missing polyproto operates under the following trust assumptions: 1. Users entrust their home server and its admins with data security and discretion on actions - appearing as actor-performed, as, with most home server-based systems, it is - possible for a home server to impersonate an actor in unencrypted communications. -2. Impersonation *can* be detected by users, as home servers never have access to private keys of + appearing as actor-performed, as, with most home server-based systems, it is possible for a home + server to impersonate an actor in unencrypted communications. +2. Impersonation _can_ be detected by users, as home servers never have access to private keys of actors. To sign messages as an actor, a home server would have to use a different key pair. -3. Users only trust information that can be verified by cryptographic means. This includes - verifying the identity of other actors and verifying the integrity of messages. -4. In a federated context, users trust foreign servers with all unencrypted data they send - to them. +3. Users only trust information that can be verified by cryptographic means. This includes verifying + the identity of other actors and verifying the integrity of messages. +4. In a federated context, users trust foreign servers with all unencrypted data they send to them. 5. Foreign servers cannot impersonate users without immediate detection. Outsiders, meaning foreign servers and other actors, are unable to produce signatures that have a cryptographic connection - to the actors' home server. This is assuming correct implementation of cryptographic - standards, secure home server operation, and non-compromised client devices, all of which are - mostly out of the scope of this specification. + to the actors' home server. This is assuming correct implementation of cryptographic standards, + secure home server operation, and non-compromised client devices, all of which are mostly out of + the scope of this specification. 6. Users rely on their home server for identity key certification, without the home server possessing the identity. ## 3. APIs and underlying communication protocols -The polyproto specification defines a set of [APIs](https://apidocs.polyproto.org). -In addition to these REST APIs, polyproto employs WebSockets for real-time communication between -clients and servers. +The polyproto specification defines a set of [APIs](https://apidocs.polyproto.org). In addition to +these REST APIs, polyproto employs WebSockets for real-time communication between clients and +servers. The APIs are divided into two categories: @@ -78,8 +77,8 @@ The APIs are divided into two categories: All software aiming to federate with other polyproto implementations must implement the APIs defined in the [API specification](https://apidocs.polyproto.org). Implementations can choose to extend the -APIs with additional routes but must not remove or change the behavior of the routes defined in -this specification. +APIs with additional routes but must not remove or change the behavior of the routes defined in this +specification. ### 3.1 `.well-known` @@ -89,41 +88,43 @@ host. :::note Consult the excerpt of this specification explaining what a "domain name" is, to avoid -misunderstandings. You can find this excerpt [here](#def-domain-name). +misunderstandings. You can find this excerpt [in the table of section #5](#def-domain-name). ::: -polyproto servers can be hosted under a domain name different from the domain name -appearing on ID-Certs managed by that server **if all the following conditions are met:** +polyproto servers can be hosted under a domain name different from the domain name appearing on +ID-Certs managed by that server **if all the following conditions are met:** -1. Define the "*visible domain name*" as the domain name described by the [polyproto distinguished name](#6111-polyproto-distinguished-name-pdn) - of the "issuer" field on an ID-Cert. -2. Define the "*actual domain name*" as the domain name where the polyproto server is actually hosted - under. -3. The *visible domain name* **must** have a URI `[visible domain name]/.well-known/polyproto-core`, +1. Define the "_visible domain name_" as the domain name described by the + [polyproto distinguished name](#6111-polyproto-distinguished-name-pdn) of the "issuer" field on + an ID-Cert. +2. Define the "_actual domain name_" as the domain name where the polyproto server is actually + hosted under. +3. The _visible domain name_ **must** have a URI `[visible domain name]/.well-known/polyproto-core`, accessible via an HTTP GET request. 4. The resource accessible at this URI must be a JSON object formatted as such: - ```json - { - "api": "[actual domain name]/.p2/core/" - } - ``` + ```json + { + "api": "[actual domain name]/.p2/core/" + } + ``` 5. The ID-Cert received when querying `[actual domain name]/.p2/core/idcert/server` with an HTTP GET - request must have a field "issuer" containing domain components (`dc`) that, when parsed, **equal** - the domain name of the *visible domain name*. If the domain components in this field do not match - the domain components of the *visible domain name*, the server hosted under the *actual domain name* - must not be treated as a polyproto server for the *visible domain name*. + request must have a field "issuer" containing domain components (`dc`) that, when parsed, + **equal** the domain name of the _visible domain name_. If the domain components in this field do + not match the domain components of the _visible domain name_, the server hosted under the _actual + domain name_ must not be treated as a polyproto server for the _visible domain name_. Every polyproto home server must have a `.well-known` URI, accessible via an HTTP GET request. -Should a client not be able to access the polyproto API endpoints located at `[visible domain name]/.p2/core/`, -the client must query `[visible domain name]/.well-known/polyproto-core` with an HTTP GET request and -try to verify the above-mentioned conditions. If all the above-mentioned conditions can be fulfilled, -the client can treat the server located at the *actual domain name* as a polyproto server serving the -*visible domain name*. Clients must not treat the server located at the *actual domain name* as a -polyproto server serving the *actual domain name*. +Should a client not be able to access the polyproto API endpoints located at +`[visible domain name]/.p2/core/`, the client must query +`[visible domain name]/.well-known/polyproto-core` with an HTTP GET request and try to verify the +above-mentioned conditions. If all the above-mentioned conditions can be fulfilled, the client can +treat the server located at the _actual domain name_ as a polyproto server serving the _visible +domain name_. Clients must not treat the server located at the _actual domain name_ as a polyproto +server serving the _actual domain name_. ### 3.2 WebSocket Protocol @@ -170,12 +171,12 @@ end ``` -*Fig. 1: Sequence diagram of a WebSocket connection to a polyproto server.* +_Fig. 1: Sequence diagram of a WebSocket connection to a polyproto server._ #### 3.2.1 Gateway Event Payloads -Gateway event payloads share a general structure, though the content of the `d` field varies depending -on the specific event. +Gateway event payloads share a general structure, though the content of the `d` field varies +depending on the specific event. | Field | Type | Description | | ----- | ---------- | ------------------------------------------------------------------- | @@ -189,9 +190,8 @@ on the specific event. ##### 3.2.1.1 Namespaces `n` The `n` field in a gateway event payload indicates the namespace context for the payload. You can -read more about namespaces in [section 8.2](#82-namespaces). -messages -Every namespace may define its own set of opcodes and event names. +read more about namespaces in [section 8.2](#82-namespaces). messages Every namespace may define its +own set of opcodes and event names. The namespace context must be known to the entity receiving the payload, as it is crucial for correctly interpreting the payload. @@ -206,7 +206,7 @@ The following opcodes are defined by the `core` namespace: | `1` | Hello | Actor Receive | Received upon establishing a connection. | | `2` | Identify | Actor Send | Identify to the server. | | `3` | New Session | Actor Receive | Received by all sessions except the new one. | -| `4` | Actor Certificate Invalidation | Actor Send/Receive | An actor certificate has been invalidated. Sent *to* server when an actor invalidates one of their certificates. | +| `4` | Actor Certificate Invalidation | Actor Send/Receive | An actor certificate has been invalidated. Sent _to_ server when an actor invalidates one of their certificates. | | `5` | Resume | Actor Send | Request the replaying events after re-connecting. | | `6` | Server Certificate Change | Actor Receive | Received when the server's certificate changed. | | `7` | Heartbeat ACK | Actor Receive | Acknowledgement of a heartbeat | @@ -217,30 +217,33 @@ The following opcodes are defined by the `core` namespace: ##### 3.2.1.3 Sequence numbers `s` -Sequence numbers are unsigned integers with a 64 bit length. In the rare event that this integer should -overflow, the server must close the connection to the client and prompt the client to initiate a new, -non-resumed gateway connection. +Sequence numbers are unsigned integers with a 64 bit length. In the rare event that this integer +should overflow, the server must close the connection to the client and prompt the client to +initiate a new, non-resumed gateway connection. The sequence number increases by one for every gateway message sent by the server. The client must -keep track of received sequence numbers as part as the [guaranteed delivery mechanism](#326-guaranteed-delivery-of-gateway-messages-through-package-acknowledgement). +keep track of received sequence numbers as part as the +[guaranteed delivery mechanism](#326-guaranteed-delivery-of-gateway-messages-through-package-acknowledgement). Every gateway connection has its own sequence number counter, starting at 0 for the first event sent by the server. #### 3.2.2 Heartbeats -Heartbeats are used to keep the WebSocket connection alive and, combined with [sequence numbers](#3213-sequence-numbers-s), -form an application-layer packet acknowledgement mechanism. The client continuously sends a heartbeat -event to the server with the interval specified in the ["Hello" event payload](#3231-hello-event). -The server must acknowledge the heartbeat event by sending a heartbeat ACK event back to the client. +Heartbeats are used to keep the WebSocket connection alive and, combined with +[sequence numbers](#3213-sequence-numbers-s), form an application-layer packet acknowledgement +mechanism. The client continuously sends a heartbeat event to the server with the interval specified +in the ["Hello" event payload](#3231-hello-event). The server must acknowledge the heartbeat event +by sending a heartbeat ACK event back to the client. Servers must account for the time it takes for the client to send the heartbeat event. Before closing a connection due to a missed heartbeat, the server should request a heartbeat event -from the client by sending a heartbeat request event to the client. If the client is not responding within -a reasonable time frame, the server should close the gateway connection with an appropriate +from the client by sending a heartbeat request event to the client. If the client is not responding +within a reasonable time frame, the server should close the gateway connection with an appropriate [close code](#325-closing-a-connection). -The structure of the heartbeat and heartbeat ACK events are described in [section 3.2.3.8](#3238-heartbeat-and-heartbeat-ack-events). +The structure of the heartbeat and heartbeat ACK events are described in +[section 3.2.3.8](#3238-heartbeat-and-heartbeat-ack-events). Recommended values for heartbeat intervals are 30 to 60 seconds. The heartbeat interval is chosen by the server. @@ -249,9 +252,9 @@ the server. ##### 3.2.3.1 "Hello" event -The "Hello" event is sent by the server to the client upon establishing a connection. The `d` payload -for a "Hello" event is an object containing a `heartbeat_interval` field, which specifies the interval -in milliseconds at which the client should send heartbeat events to the server. +The "Hello" event is sent by the server to the client upon establishing a connection. The `d` +payload for a "Hello" event is an object containing a `heartbeat_interval` field, which specifies +the interval in milliseconds at which the client should send heartbeat events to the server. :::tip[Example hello event payload] @@ -260,7 +263,7 @@ in milliseconds at which the client should send heartbeat events to the server. "n": "core", "op": 1, "d": { - "heartbeat_interval": 45000 + "heartbeat_interval": 45000 }, "s": 0 } @@ -284,7 +287,7 @@ client is. "n": "core", "op": 2, "d": { - "token": "a9144379a161e1fcf6b07801b70db6d6c481..." + "token": "a9144379a161e1fcf6b07801b70db6d6c481..." } } ``` @@ -307,8 +310,8 @@ Service channels act like topics in a pub/sub system. They allow clients to subs topic and receive messages sent to that topic. Converting that analogy to polyproto, service channels allow clients to subscribe to gateway events -of additional namespaces. Service channels allow a unified way of giving extensions access to WebSockets -without having to initialize a separate WebSocket connection. +of additional namespaces. Service channels allow a unified way of giving extensions access to +WebSockets without having to initialize a separate WebSocket connection. A service channel event payload has the following structure: @@ -319,8 +322,8 @@ A service channel event payload has the following structure: "n": "core", "op": 8, "d": { - "action": "subscribe", - "service": "service_name" + "action": "subscribe", + "service": "service_name" } } ``` @@ -333,22 +336,22 @@ A service channel event payload has the following structure: | `service` | string | The name of a polyproto service. | The server must respond with a `Service Channel ACK` event payload, indicating whether the action -was successful or not. Clients should expect that the server sends a `Service Channel` payload indicating -the closing of a channel. +was successful or not. Clients should expect that the server sends a `Service Channel` payload +indicating the closing of a channel. :::tip[Example service channel ACK event payload - failure] ```json { -"n": "core", -"op": 9, -"d": { -"action": "subscribe", -"service": "service_name", -"success": false, -"error": "Service not found" -}, -"s": 1 + "n": "core", + "op": 9, + "d": { + "action": "subscribe", + "service": "service_name", + "success": false, + "error": "Service not found" + }, + "s": 1 } ``` @@ -361,9 +364,9 @@ the closing of a channel. "n": "core", "op": 9, "d": { - "action": "subscribe", - "service": "service_name", - "success": true, + "action": "subscribe", + "service": "service_name", + "success": true }, "s": 1 } @@ -395,7 +398,7 @@ about the new session mechanism in "n": "core", "op": 3, "d": { - "cert": "-----BEGIN CERTIFICATE-----\nMIIBIjANB..." + "cert": "-----BEGIN CERTIFICATE-----\nMIIBIjANB..." }, "s": 1 } @@ -410,9 +413,9 @@ about the new session mechanism in ##### 3.2.3.5 Actor certificate invalidation event The actor certificate invalidation event is crucial to ensure that the client can detect and respond -to changes in actor certificates. This prevents clients and servers from accepting outdated ID-Certs. -This event is only sent by servers if an [early revocation of an actor ID-Cert](#614-early-revocation-of-id-certs) -occurs. +to changes in actor certificates. This prevents clients and servers from accepting outdated +ID-Certs. This event is only sent by servers if an +[early revocation of an actor ID-Cert](#614-early-revocation-of-id-certs) occurs. :::tip[Example actor certificate invalidation event payload] @@ -421,9 +424,9 @@ occurs. "n": "core", "op": 4, "d": { - "serial": "11704583652649", - "invalidSince": "1737379403", - "signature": "8eacd92192bacc57bb5df3c7922e93bbc8b3f683f5dec9224353b102fa2f2a75" + "serial": "11704583652649", + "invalidSince": "1737379403", + "signature": "8eacd92192bacc57bb5df3c7922e93bbc8b3f683f5dec9224353b102fa2f2a75" }, "s": 1 } @@ -439,8 +442,8 @@ occurs. ##### 3.2.3.6 "Resume" event and "resumed" event -When a client re-connects to a polyproto WebSocket gateway server, the client may send a resume event -to the server instead of identifying. The resumed event sent by the server informs the client +When a client re-connects to a polyproto WebSocket gateway server, the client may send a resume +event to the server instead of identifying. The resumed event sent by the server informs the client about everything the client has missed since their last active connection to the gateway. :::tip[Example resume event structure] @@ -450,8 +453,8 @@ about everything the client has missed since their last active connection to the "n": "core", "op": 5, "d": { - "s": 12, - "token": "aDHsdfghihn2n0c634tnlxibnd2tz09y8m7kbxti7rg" + "s": 12, + "token": "aDHsdfghihn2n0c634tnlxibnd2tz09y8m7kbxti7rg" } } ``` @@ -511,20 +514,19 @@ A set of "relevant events" is a set of events which meet both of the following c :::tip[Example for condition #2] -Assume, that an event "total number of messages sent" exists. The value of this event -payload is a number, representing the total number of messages sent on the entire server. Under -normal circumstances, each client receives this imaginary event every time this state changes. +Assume, that an event "total number of messages sent" exists. The value of this event payload is a +number, representing the total number of messages sent on the entire server. Under normal +circumstances, each client receives this imaginary event every time this state changes. For the client to resume, the server should not send each individual update of this value to the -client as part of the "resumed" event. Instead, it would be sufficient to send the most -up-to-date value of this event as part of the "resumed" payload, since how many times this event -has been fired and what previous values of this event were, has no impact -on the validity or state of other events. +client as part of the "resumed" event. Instead, it would be sufficient to send the most up-to-date +value of this event as part of the "resumed" payload, since how many times this event has been fired +and what previous values of this event were, has no impact on the validity or state of other events. -Certificate change events are an example of events, where all intermediary values of the event -are important as well. This is because a client could have sent a message where the signature was -generated using a revoked certificate. In other words, intermediary values of this event type -affect the validity or state of other events. +Certificate change events are an example of events, where all intermediary values of the event are +important as well. This is because a client could have sent a message where the signature was +generated using a revoked certificate. In other words, intermediary values of this event type affect +the validity or state of other events. ::: @@ -533,13 +535,14 @@ Servers may reject a clients' wish to resume, if - The number of events that would need to be replayed is too high for the server to process. - The client is not eligible to resume and must start a new session instead. -In this case, the request to resume is met with an appropriate [close code](#325-closing-a-connection) -(ex.: `4010`) by the server and the connection is terminated. +In this case, the request to resume is met with an appropriate +[close code](#325-closing-a-connection) (ex.: `4010`) by the server and the connection is +terminated. ##### 3.2.3.7 Server certificate change event -The server certificate change event notifies clients about a new server ID-Cert. The `d` payload -of this event contains the ASCII-PEM encoded ID-Cert of the server. +The server certificate change event notifies clients about a new server ID-Cert. The `d` payload of +this event contains the ASCII-PEM encoded ID-Cert of the server. :::tip[Example server certificate change event payload] @@ -548,8 +551,8 @@ of this event contains the ASCII-PEM encoded ID-Cert of the server. "n": "core", "op": 6, "d": { - "cert": "-----BEGIN CERTIFICATE-----\nMIIBIjANB...", - "oldInvalidSince": 1630012713 + "cert": "-----BEGIN CERTIFICATE-----\nMIIBIjANB...", + "oldInvalidSince": 1630012713 }, "s": 1 } @@ -564,10 +567,10 @@ of this event contains the ASCII-PEM encoded ID-Cert of the server. ##### 3.2.3.8 Heartbeat and heartbeat ACK events -The heartbeat event is sent by the client to the server to keep the WebSocket connection alive. -The payload for the heartbeat event is a minified number list. Minified number lists are a JSON -object with the fields `from`, `to`, and `except`. The `from` and `to` fields are strings representing -a range of numbers. The `except` field is an array of strings representing numbers that are not +The heartbeat event is sent by the client to the server to keep the WebSocket connection alive. The +payload for the heartbeat event is a minified number list. Minified number lists are a JSON object +with the fields `from`, `to`, and `except`. The `from` and `to` fields are strings representing a +range of numbers. The `except` field is an array of strings representing numbers that are not included in the range. :::info @@ -577,8 +580,8 @@ Numbers are formatted as strings due to JSON conventions. Every number in the `f ::: -The range described by the `from` and `to` fields is a mathematical, closed interval, where -`from` is equal to $a$ and `to` is equal to $b$ : +The range described by the `from` and `to` fields is a mathematical, closed interval, where `from` +is equal to $a$ and `to` is equal to $b$ : $$ [a,b]=\{x\in \mathbb {N} \mid a\leq x\leq b\} @@ -588,9 +591,9 @@ $$ ```json { - from: "1", - to: "20", - except: ["9", "12", "13"] + "from": "1", + "to": "20", + "except": ["9", "12", "13"] } ``` @@ -652,22 +655,22 @@ A heartbeat ACK contains events that the client has re-requested as part of thei ::: -As such, the field `d` in a heartbeat ack may be empty, but never not present. The `d` field contains -an array of other gateway events. Heartbeat ACK payloads must not be present in this array, making recursion -impossible. +As such, the field `d` in a heartbeat ack may be empty, but never not present. The `d` field +contains an array of other gateway events. Heartbeat ACK payloads must not be present in this array, +making recursion impossible. ##### 3.2.3.9 Heartbeat request -The server may manually request a heartbeat from a client at any time. -A heartbeat is usually manually requested, if the server has not received a heartbeat from the client -in due time. Clients should keep their "heartbeat timer" running as is after sending a heartbeat following -a heartbeat request. +The server may manually request a heartbeat from a client at any time. A heartbeat is usually +manually requested, if the server has not received a heartbeat from the client in due time. Clients +should keep their "heartbeat timer" running as is after sending a heartbeat following a heartbeat +request. :::info If the client heartbeat timer states that the next heartbeat in a heartbeat interval of 45 seconds -is due in 8 seconds, the timer should still "read" ~8 seconds after a manual heartbeat request -has been fulfilled. Of course, the client should not send the same heartbeat twice. +is due in 8 seconds, the timer should still "read" ~8 seconds after a manual heartbeat request has +been fulfilled. Of course, the client should not send the same heartbeat twice. ::: @@ -688,7 +691,8 @@ Heartbeat request events do not carry any data in their `d` payload. #### 3.2.4 Establishing a connection -The following diagram illustrates the process of establishing a WebSocket connection, including authentication, error handling with close codes, heartbeat, and session resumption: +The following diagram illustrates the process of establishing a WebSocket connection, including +authentication, error handling with close codes, heartbeat, and session resumption: ```mermaid sequenceDiagram @@ -733,15 +737,17 @@ opt Resume session end ``` -*Fig. X: Sequence diagram of WebSocket connection establishment, authentication, heartbeat, and error handling in polyproto.* +_Fig. X: Sequence diagram of WebSocket connection establishment, authentication, heartbeat, and +error handling in polyproto._ #### 3.2.5 Closing a connection At any time during the connection, the server or client may wish to terminate the session in an -orderly fashion. This is being done by sending a [WebSocket close code](https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.5) -to the recipient. In addition to the pre-defined status codes in [IETF RFC #6455](https://www.rfc-editor.org/rfc/rfc6455.html), -polyproto servers and clients must know of and use the following status codes in their appropriate -situations: +orderly fashion. This is being done by sending a +[WebSocket close code](https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.5) to the recipient. +In addition to the pre-defined status codes in +[IETF RFC #6455](https://www.rfc-editor.org/rfc/rfc6455.html), polyproto servers and clients must +know of and use the following status codes in their appropriate situations: | Code | Description | Explanation | Eligible for `RESUME`? | Sent by server? | Sent by client? | | ------ | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | --------------- | --------------- | @@ -759,10 +765,10 @@ situations: #### 3.2.6 Guaranteed delivery of gateway messages through package acknowledgement -polyproto implements an application-level guaranteed delivery mechanism. This ensures that all gateway -messages sent from a home server to a client are received by the client in the order they were sent -in – especially when network conditions are suboptimal. This mechanism is based on the use of -[sequence numbers](#3213-sequence-numbers-s) and [heartbeats](#322-heartbeats). +polyproto implements an application-level guaranteed delivery mechanism. This ensures that all +gateway messages sent from a home server to a client are received by the client in the order they +were sent in – especially when network conditions are suboptimal. This mechanism is based on the use +of [sequence numbers](#3213-sequence-numbers-s) and [heartbeats](#322-heartbeats). ??? question "Doesn't TCP already cover this?" @@ -775,11 +781,12 @@ in – especially when network conditions are suboptimal. This mechanism is base retransmitted, preserving the integrity and completeness of communication between the client and server. -The [heartbeat payload](#3238-heartbeat-and-heartbeat-ack-events) defines a payload parameter `except`. +The [heartbeat payload](#3238-heartbeat-and-heartbeat-ack-events) defines a payload parameter +`except`. If `except` was present and contained entries in the heartbeat payload sent by a client, the server -must re-send these events in the `d` part of the heartbeat ACK response. How this `d` payload is to be -formatted is also defined in [section 3.2.3.8](#3238-heartbeat-and-heartbeat-ack-events). +must re-send these events in the `d` part of the heartbeat ACK response. How this `d` payload is to +be formatted is also defined in [section 3.2.3.8](#3238-heartbeat-and-heartbeat-ack-events). The server must prioritize sending these "missed" events over other events. The server should expect that a client requests these events yet another time. @@ -787,28 +794,27 @@ that a client requests these events yet another time. #### 3.3 Events over HTTP For some implementation contexts, a constant WebSocket connection might not be wanted. A client can -instead opt to query an API endpoint to receive events, which would normally be sent through the WebSocket -connection. Concrete polyproto implementations and extensions can decide whether this alternative -behavior is supported. +instead opt to query an API endpoint to receive events, which would normally be sent through the +WebSocket connection. Concrete polyproto implementations and extensions can decide whether this +alternative behavior is supported. :::tip[Example] An example of an implementation context where having a constant WebSocket might not be wanted would -be Urban IoT devices, or devices with a limited or only periodically available internet -connection. +be Urban IoT devices, or devices with a limited or only periodically available internet connection. ::: -Querying [this endpoint](/APIs/Core/Routes%3A No registration needed/#get-events) yields a JSON array -containing all events the session has missed since last querying the endpoint or since last being -connected to the WebSocket. +Querying [this endpoint](/APIs/Core/Routes%3A No registration needed/#get-events) yields a JSON +array containing all events the session has missed since last querying the endpoint or since last +being connected to the WebSocket. -Depending on how many events the session has -missed, the earliest events might be excluded from the response to limit the response body's size. This -behavior should be explicitly documented in implementations or extensions of polyproto. +Depending on how many events the session has missed, the earliest events might be excluded from the +response to limit the response body's size. This behavior should be explicitly documented in +implementations or extensions of polyproto. -Due to the intended use cases for retrieving events through REST rather than WebSockets, -this endpoint is not a long-polling endpoint. +Due to the intended use cases for retrieving events through REST rather than WebSockets, this +endpoint is not a long-polling endpoint. There are three intended, main modes for retrieving events in polyproto @@ -817,9 +823,9 @@ There are three intended, main modes for retrieving events in polyproto time. 3. Do not use WebSockets and only query the REST API. -Polling a REST endpoint is inherently inefficient and therefore should only be done with a high interval, -ranging from a few minutes to a few days. If a client requires information more often than that, -then a WebSocket connection should be considered. +Polling a REST endpoint is inherently inefficient and therefore should only be done with a high +interval, ranging from a few minutes to a few days. If a client requires information more often than +that, then a WebSocket connection should be considered. ### 3.4 HTTP @@ -838,10 +844,10 @@ Protocol in polyproto should happen on a best-effort basis. :::tip[Explanation] -We do not mandate that access to a polyproto server must be possible over both IPv4 and IPv6 -as most of the world is not sufficiently IPv6 capable. We do, however, mandate that software -written to support polyproto must be capable of handling traffic over both IPv4 and IPv6, should -both versions of the Internet Protocol be available to the software at runtime. +We do not mandate that access to a polyproto server must be possible over both IPv4 and IPv6 as most +of the world is not sufficiently IPv6 capable. We do, however, mandate that software written to +support polyproto must be capable of handling traffic over both IPv4 and IPv6, should both versions +of the Internet Protocol be available to the software at runtime. ::: @@ -870,8 +876,9 @@ The federation of actor identities allows users to engage with foreign servers a home servers. For example, in polyproto-chat, an actor can send direct messages to users from a different server or join the guilds of other servers. -Identity certificates defined in sections [#6. Cryptography and ID-Certs](#6-cryptography-and-id-certs) -and [#6.1 Home server signed certificates for public client identity keys (ID-Cert)](#61-home-server-signed-certificates-for-public-client-identity-keys-id-cert) +Identity certificates defined in sections +[#6. Cryptography and ID-Certs](#6-cryptography-and-id-certs) and +[#6.1 Home server signed certificates for public client identity keys (ID-Cert)](#61-home-server-signed-certificates-for-public-client-identity-keys-id-cert) are employed to sign messages that the actor sends to other servers. :::note[Using one identity for several polyproto implementations] @@ -897,23 +904,21 @@ authenticating on home servers and foreign servers alike. :::warning -Close interoperation is only possible if all involved polyproto implementations have an -overlapping set of supported authentication methods. Therefore, it is highly recommended to implement -and use the polyproto-auth standard, unless your use case requires a different -authentication method. Of course, other authentication methods can be implemented in addition to -polyproto-auth. +Close interoperation is only possible if all involved polyproto implementations have an overlapping +set of supported authentication methods. Therefore, it is highly recommended to implement and use +the polyproto-auth standard, unless your use case requires a different authentication method. Of +course, other authentication methods can be implemented in addition to polyproto-auth. ::: -When successfully authenticated, a client receives a session token, which can then be used to -access authenticated routes on the REST API and to establish a WebSocket connection. Each ID-Cert -can only have one active session token at a time. +When successfully authenticated, a client receives a session token, which can then be used to access +authenticated routes on the REST API and to establish a WebSocket connection. Each ID-Cert can only +have one active session token at a time. :::info[About session tokens] -Session tokens are used to authenticate a user over a longer period of time, instead of for -example, requiring the user to solve a challenge string every time they want to access a -protected route. +Session tokens are used to authenticate a user over a longer period of time, instead of for example, +requiring the user to solve a challenge string every time they want to access a protected route. ::: @@ -976,13 +981,14 @@ TODO: Better describe "Sensitive-Solution". :::warning -Sensitive actions require a second factor of authentication, apart from the actor's -private key. This second factor can be anything from a password to TOTP or hardware keys, depending -on the authentication method or standard used. +Sensitive actions require a second factor of authentication, apart from the actor's private key. +This second factor can be anything from a password to TOTP or hardware keys, depending on the +authentication method or standard used. If this is not done, a malicious user who gained access to an actors' private key can lock that -actor out of their account entirely, as the malicious user could [revoke the actors' other ID-Certs](#714-early-revocation-of-id-certs), -and thus prevent the actor from logging in again. +actor out of their account entirely, as the malicious user could +[revoke the actors' other ID-Certs](#614-early-revocation-of-id-certs), and thus prevent the actor +from logging in again. ::: @@ -1000,19 +1006,19 @@ header value represents the second factor of authentication chosen. :::tip[Example] -If the chosen second factor of authentication is TOTP, the value of this header is the current -TOTP verification code. If the chosen second factor of authentication is a password, then the -value of this header is to be that password. +If the chosen second factor of authentication is TOTP, the value of this header is the current TOTP +verification code. If the chosen second factor of authentication is a password, then the value of +this header is to be that password. ::: ### 4.3 Challenge strings -Servers use challenge strings to verify an actor's private identity key -possession without revealing the private key itself. These strings, ranging from 32 to 256 -UTF-8 characters, have a UNIX timestamp lifetime. If the current timestamp surpasses this -lifetime, the challenge fails. The actor signs the string, sending the signature and their -ID-Cert to the server, which then verifies the signature's authenticity. +Servers use challenge strings to verify an actor's private identity key possession without revealing +the private key itself. These strings, ranging from 32 to 256 UTF-8 characters, have a UNIX +timestamp lifetime. If the current timestamp surpasses this lifetime, the challenge fails. The actor +signs the string, sending the signature and their ID-Cert to the server, which then verifies the +signature's authenticity. :::warning @@ -1021,23 +1027,24 @@ Challenge strings provide a different set of security guarantees than ::: -All challenge strings and their responses created must be made -public to ensure that a chain of trust can be maintained. A third party should be able to verify that -the challenge string, which authorized a specific change in data, was signed by the -correct private key. The API routes needed to verify challenges as an outsider are documented in the +All challenge strings and their responses created must be made public to ensure that a chain of +trust can be maintained. A third party should be able to verify that the challenge string, which +authorized a specific change in data, was signed by the correct private key. The API routes needed +to verify challenges as an outsider are documented in the [API documentation](https://apidocs.polyproto.org). :::tip -For public-facing polyproto implementations, it is recommended to use a challenge string length -of at least 64 characters, including at least one character from each of the alphanumeric -character classes (`[a-zA-Z0-9]`). Server implementations should ensure that challenge strings -are unique per actor. If this is not the case, actors could potentially be the target of replay attacks. +For public-facing polyproto implementations, it is recommended to use a challenge string length of +at least 64 characters, including at least one character from each of the alphanumeric character +classes (`[a-zA-Z0-9]`). Server implementations should ensure that challenge strings are unique per +actor. If this is not the case, actors could potentially be the target of replay attacks. ::: -Challenge strings can counteract replay attacks. Their uniqueness ensures that even identical requests -have different signatures, preventing malicious servers from successfully replaying requests. +Challenge strings can counteract replay attacks. Their uniqueness ensures that even identical +requests have different signatures, preventing malicious servers from successfully replaying +requests. Accessing a challenge string protected route is done as follows: @@ -1076,10 +1083,10 @@ servers from generating federation tokens for users without their consent and kn :::tip[Potential misuse scenario] -A malicious home server can potentially request a federation token on behalf of one of its -users, and use it to generate a session token on the actor's behalf. The malicious server can -then impersonate the actor on another server, as well as read unencrypted data (such as messages, -in the context of a chat application) sent on the other server. +A malicious home server can potentially request a federation token on behalf of one of its users, +and use it to generate a session token on the actor's behalf. The malicious server can then +impersonate the actor on another server, as well as read unencrypted data (such as messages, in the +context of a chat application) sent on the other server. ::: @@ -1095,9 +1102,9 @@ something like this without the actor noticing. Polyproto servers need to inform users of new sessions. This visibility hampers malicious home servers, but does not solve the issue of them being able to create federation tokens for servers the actor does not connect to. This is because, naturally, users cannot receive notifications without a -connection. Clients re-establishing server connections must be updated on any new sessions -generated during their absence. The `NEW_SESSION` gateway event must be dispatched to all sessions, -excluding the new session. The `NEW_SESSION` event's stored data can be accessed in the +connection. Clients re-establishing server connections must be updated on any new sessions generated +during their absence. The `NEW_SESSION` gateway event must be dispatched to all sessions, excluding +the new session. The `NEW_SESSION` event's stored data can be accessed in the [Gateway Events documentation](https://http.cat/404). :::note @@ -1114,10 +1121,11 @@ end-to-end encryption, such as polyproto-mls. ## 5. Federation IDs (FIDs) Every client requires an associated actor identity. Actors are distinguished by a unique federation -ID (FID). FIDs consist of a local name, which is unique per instance, and the instance's root domain. -This combination ensures global uniqueness. +ID (FID). FIDs consist of a local name, which is unique per instance, and the instance's root +domain. This combination ensures global uniqueness. -FIDs used in public contexts are formatted as `actor@optionalsubdomain.domain.tld` and are case-insensitive. +FIDs used in public contexts are formatted as `actor@optionalsubdomain.domain.tld` and are +case-insensitive. FIDs consist of the following parts: @@ -1127,7 +1135,8 @@ FIDs consist of the following parts: | `@` | "Separator" | Separates local name from domain name | | `optionalsubdomain.domain.tld` | "Domain Name" | Includes top-level domain, second-level domain and other subdomains. Address which the actors' home server can be reached at. | -The following regular expression can be used to validate actor IDs: `\b([a-z0-9._%+-]+)@([a-z0-9-]+(\.[a-z0-9-]+)*)$`. +The following regular expression can be used to validate actor IDs: +`\b([a-z0-9._%+-]+)@([a-z0-9-]+(\.[a-z0-9-]+)*)$`. :::info @@ -1144,16 +1153,18 @@ indicates that the federation ID is formatted correctly. ::: For all intents and purposes, a federation ID is a display of identity. However, verifying identity -claims is crucial. See [Section #6.1](#61-home-server-signed-certificates-for-public-client-identity-keys-id-cert) -and [Section #6.2.2](#621-message-verification) for more information. +claims is crucial. See +[Section #6.1](#61-home-server-signed-certificates-for-public-client-identity-keys-id-cert) and +[Section #6.2.2](#621-message-verification) for more information. ## 6. Cryptography and ID-Certs ### 6.1 Home server signed certificates for public client identity keys (ID-Cert) The ID-Cert, an [X.509](https://en.wikipedia.org/wiki/X.509) certificate, validates a public actor -identity key. It is an actor-generated CSR ([Certificate Signing Request](https://en.wikipedia.org/wiki/Certificate_signing_request)), -signed by a home server, encompassing actor identity information and the client's public identity key. +identity key. It is an actor-generated CSR +([Certificate Signing Request](https://en.wikipedia.org/wiki/Certificate_signing_request)), signed +by a home server, encompassing actor identity information and the client's public identity key. Clients can get an ID-Cert in return for a valid and well-formed CSR. Generating a new ID-Cert is considered a [sensitive action](#42-sensitive-actions) and therefore should require a second factor of authentication. @@ -1161,36 +1172,40 @@ of authentication. A CSR in the context of polyproto will be referred to as an ID-CSR. ID-CSRs are DER- or PEM-encoded [PKCS #10](https://datatracker.ietf.org/doc/html/rfc2986) CSRs, with a few additional requirements. -All ID-Certs are valid X.509 v3 certificates. However, not all X.509 v3 certificates are valid ID-Certs. +All ID-Certs are valid X.509 v3 certificates. However, not all X.509 v3 certificates are valid +ID-Certs. -ID-Certs form the basis of message signing and verification in polyproto. -They are used to verify the identity of a client and to verify the integrity of messages sent by a -client. +ID-Certs form the basis of message signing and verification in polyproto. They are used to verify +the identity of a client and to verify the integrity of messages sent by a client. An ID-CSR includes the following information, according to the X.509 standard: - The public identity key of the client. -- A polyproto Distinguished Name (`pDN`) "subject", describing the actor the certificate is - issued to. The `pDN` must be formatted according to [Section 6.1.1.1](#6111-polyproto-distinguished-name-pdn). +- A polyproto Distinguished Name (`pDN`) "subject", describing the actor the certificate is issued + to. The `pDN` must be formatted according to + [Section 6.1.1.1](#6111-polyproto-distinguished-name-pdn). - The signature algorithm used to sign the certificate. - The signature of the certificate, generated by using the entities' private identity key. -- A version identifier, specifying the version of X.509 certificate used. See [chapter 6.1.1](#611-structure-of-an-id-cert) - for a specification of what the version field must look like. -- A list of X.509 capabilities which the actor requests for their certificate. See [chapter 6.1.1.2](#6112-extensions-and-constraints) - for a specification of allowed, required and forbidden capabilities. +- A version identifier, specifying the version of X.509 certificate used. See + [chapter 6.1.1](#611-structure-of-an-id-cert) for a specification of what the version field must + look like. +- A list of X.509 capabilities which the actor requests for their certificate. See + [chapter 6.1.1.2](#6112-extensions-and-constraints) for a specification of allowed, required and + forbidden capabilities. -When signing an ID-CSR, the home server must verify the correctness of all claims presented in the CSR. +When signing an ID-CSR, the home server must verify the correctness of all claims presented in the +CSR. :::warning "Important" -All entities receiving an ID-Cert MUST inspect the certificate for correctness and validity. -This includes checking whether the signature matches the certificates' contents and checking the +All entities receiving an ID-Cert MUST inspect the certificate for correctness and validity. This +includes checking whether the signature matches the certificates' contents and checking the certificate's validity period. ::: -Actors must use a separate ID-Cert for each client or session they use. Separating ID-Certs -limits the potential damage a compromised ID-Cert can cause. +Actors must use a separate ID-Cert for each client or session they use. Separating ID-Certs limits +the potential damage a compromised ID-Cert can cause. For two implementations of polyproto to be interoperable, they must support an overlapping set of digital signature algorithms. See [Section 6.5](#65-cryptographic-specifications) for more @@ -1199,8 +1214,8 @@ information on cryptographic specifications. #### 6.1.1 Structure of an ID-Cert The ID-Cert is a valid X.509 certificate, and as such, it has a specific structure. The structure of -an X.509 certificate is defined in [RFC5280](https://tools.ietf.org/html/rfc5280). -ID-Certs encompass a subset of the structure of an X.509 certificate. +an X.509 certificate is defined in [RFC5280](https://tools.ietf.org/html/rfc5280). ID-Certs +encompass a subset of the structure of an X.509 certificate. ID-Certs have the following structure: @@ -1219,35 +1234,37 @@ ID-Certs have the following structure: | The session ID of the client. | No two valid certificates for one session ID can exist. Session IDs have to be unique per actor. | Subject Unique Identifier | | Extensions | [Extensions and Constraints](#6112-extensions-and-constraints) | Extensions | -The domain components (`dc`) in the "issuer" and "subject" fields must be equal and in the same order. -A certificate may not be treated as valid otherwise. X.509 semantics describing the correct ordering -of domain components apply. +The domain components (`dc`) in the "issuer" and "subject" fields must be equal and in the same +order. A certificate may not be treated as valid otherwise. X.509 semantics describing the correct +ordering of domain components apply. ##### 6.1.1.1 polyproto Distinguished Name (`pDN`) -polyproto Distinguished Names (`pDNs`) are a subset of an X.509 certificate's [distinguished -Names (`DNs`)](https://ldap.com/ldap-dns-and-rdns/), defined by the LDAP Data Interchange Format (LDIF). -The `DN` is a sequence of [relative distinguished names (`RDNs`)](https://ldap.com/ldap-dns-and-rdns/). +polyproto Distinguished Names (`pDNs`) are a subset of an X.509 certificate's +[distinguished Names (`DNs`)](https://ldap.com/ldap-dns-and-rdns/), defined by the LDAP Data +Interchange Format (LDIF). The `DN` is a sequence of +[relative distinguished names (`RDNs`)](https://ldap.com/ldap-dns-and-rdns/). A `pDN` must meet all the following requirements: -- If the `pDN` describes an actor, it must have a "common name" attribute. The - common name must be the [local name](#5-federation-ids-fids) of the actor. In the case of an actor - with an FID of `xenia@example.com`, the local name would be `xenia`. If the `pDN` describes a - home server, the "common name" attribute must not be present. +- If the `pDN` describes an actor, it must have a "common name" attribute. The common name must be + the [local name](#5-federation-ids-fids) of the actor. In the case of an actor with an FID of + `xenia@example.com`, the local name would be `xenia`. If the `pDN` describes a home server, the + "common name" attribute must not be present. - Must have at least one domain component `dc`, specifying the domain name under which the home server can be reached. This includes the home server's top- and second-level domains, as well as all other subdomains, if present. If the home server does not have a sub- or top-level domain, the `dc` fields for these components should be omitted. - If the `pDN` describes an actor, the `pDN` must include the `UID` ([OID](https://en.wikipedia.org/wiki/Object_identifier) 0.9.2342.19200300.100.1.1) **and** - `uniqueIdentifier` ([OID](https://en.wikipedia.org/wiki/Object_identifier) 0.9.2342.19200300.100.1.44) - fields. - - `UID` field must be equal to the federation ID of the actor, e.g., `actor@domainname-of-home server.example.com`. + `uniqueIdentifier` ([OID](https://en.wikipedia.org/wiki/Object_identifier) + 0.9.2342.19200300.100.1.44) fields. + - `UID` field must be equal to the federation ID of the actor, e.g., + `actor@domainname-of-home server.example.com`. - `uniqueIdentifier` field must be a [Session ID](#6113-session-ids). - Can have other attributes if the additional attributes do not conflict with the above - requirements. Additional attributes might be ignored by other home servers and other clients unless - specified otherwise in a polyproto extension. Additional attributes not part of a polyproto + requirements. Additional attributes might be ignored by other home servers and other clients + unless specified otherwise in a polyproto extension. Additional attributes not part of a polyproto extension must be non-critical X.509 extensions. ##### 6.1.1.2 Extensions and constraints @@ -1265,18 +1282,19 @@ The following constraints must be met by ID-Certs: [Key Usage Flags](https://cryptography.io/en/latest/x509/reference/#cryptography.x509.KeyUsage) and [Basic Constraints](https://cryptography.io/en/latest/x509/reference/#cryptography.x509.BasicConstraints) -are critical extensions. Therefore, if any of these X.509 extensions are present, they must be marked -as "critical." ID-Certs not adhering to this standard must be treated as malformed. +are critical extensions. Therefore, if any of these X.509 extensions are present, they must be +marked as "critical." ID-Certs not adhering to this standard must be treated as malformed. ##### 6.1.1.3 Session IDs -The session ID is an [`ASN.1`](https://en.wikipedia.org/wiki/ASN.1) [`Ia5String`](https://en.wikipedia.org/wiki/IA5STRING) -chosen by the actor requesting the ID-Cert. It is used to uniquely identify a session. The session -ID must be unique for each certificate issued to that actor. A session ID can be reused if the -session belonging to that session ID has become invalid. Session ID reuse in this case also applies -when a different ID-Cert wants to use the same session ID, provided that the session ID is not currently -in use. If the session ID is currently in use, the actor requesting the ID-Cert must select a different -session ID, as session IDs must not be overridden silently. +The session ID is an [`ASN.1`](https://en.wikipedia.org/wiki/ASN.1) +[`Ia5String`](https://en.wikipedia.org/wiki/IA5STRING) chosen by the actor requesting the ID-Cert. +It is used to uniquely identify a session. The session ID must be unique for each certificate issued +to that actor. A session ID can be reused if the session belonging to that session ID has become +invalid. Session ID reuse in this case also applies when a different ID-Cert wants to use the same +session ID, provided that the session ID is not currently in use. If the session ID is currently in +use, the actor requesting the ID-Cert must select a different session ID, as session IDs must not be +overridden silently. Session IDs are 1-32 characters long and. They can contain any character permitted by the `ASN.1` `IA5String` type. @@ -1292,12 +1310,12 @@ server public identity key caching but no home server-issued identity key certif :::tip[Potential misuse scenario] -A malicious foreign server B can fake a message from Alice. -(Home server: Server A) to Bob (Home Server: Server B), by generating a new identity key pair -and using it to sign the malicious message. The foreign server then sends that message to Bob, -who will then request Alice's public identity key from Server B, who will then send Bob the -malicious public identity key. Bob will succeed in verifying the signature of the message, and -not notice that the message has been crafted by a malicious server. +A malicious foreign server B can fake a message from Alice. (Home server: Server A) to Bob (Home +Server: Server B), by generating a new identity key pair and using it to sign the malicious message. +The foreign server then sends that message to Bob, who will then request Alice's public identity key +from Server B, who will then send Bob the malicious public identity key. Bob will succeed in +verifying the signature of the message, and not notice that the message has been crafted by a +malicious server. ::: @@ -1309,33 +1327,33 @@ malicious server cannot generate an identity key pair for Alice, which is signed A session can choose to regenerate their ID-Cert at any time. This is done by taking an identity key pair, using the private key to generate a new CSR, and sending the new Certificate Signing Request to the home server. The home server will then generate the new ID-Cert, given that the CSR is valid. -Actors can only regenerate ID-Certs for their current session, identified by their session ID and +Actors can only regenerate ID-Certs for their current session, identifiedby their session ID and session token. Other sessions can only be invalidated by [revoking them](#614-early-revocation-of-id-certs). Re-generating an ID-Cert is a [sensitive action](#42-sensitive-actions), performed by using the appropriate API route. -Home servers must keep track of the ID-Certs of all users (and their clients) registered on them -and must offer a clients' ID-Cert for a given timestamp on request. This is to ensure messages -sent by users, even ones sent a long time ago, can be verified by other servers and their users. -This is because the public key of an actor likely changes over time, and users must sign all messages -they send to servers. +Home servers must keep track of the ID-Certs of all users (and their clients) registered on them and +must offer a clients' ID-Cert for a given timestamp on request. This is to ensure messages sent by +users, even ones sent a long time ago, can be verified by other servers and their users. This is +because the public key of an actor likely changes over time, and users must sign all messages they +send to servers. Users must hold on to all of their past key pairs, as they might need them to [migrate their account in the future](#7-migrations). How this is done is specified in [section 6.3: Private key loss prevention and private key recovery](#63-private-key-loss-prevention-and-private-key-recovery). -The lifetime of an actor ID-Cert should be limited to a maximum of 60 days. This is to ensure that even -in a worst-case scenario, a compromised ID-Cert can only be used for a limited amount of time. "Renewing" -an ID-Cert consists of: +The lifetime of an actor ID-Cert should be limited to a maximum of 60 days. This is to ensure that +even in a worst-case scenario, a compromised ID-Cert can only be used for a limited amount of time. +"Renewing" an ID-Cert consists of: 1. Revoking the old ID-Cert 2. Requesting a new ID-Cert with the same [session ID](#6113-session-ids) as the old ID-Cert. -A client that has this second factor of authentication stored -should renew the ID-Cert of the authenticated actor without further interaction. +A client that has this second factor of authentication stored should renew the ID-Cert of the +authenticated actor without further interaction. -Server ID-Certs should be rotated way less often (every 1-3 years). Only rotate a server ID-Cert -if it is suspected to be compromised, is lost, or has expired. +Server ID-Certs should be rotated way less often (every 1-3 years). Only rotate a server ID-Cert if +it is suspected to be compromised, is lost, or has expired. ```mermaid sequenceDiagram @@ -1353,15 +1371,14 @@ alt verify success end ``` -*Fig. 2: Sequence diagram depicting the process of a client that uses a CSR to request a new ID-Cert -from their home server.* +_Fig. 2: Sequence diagram depicting the process of a client that uses a CSR to request a new ID-Cert +from their home server._ A server identity key's lifetime might come to an early or unexpected end, perhaps due to some sort of leak of the corresponding private key. When this happens, the server should generate a new -identity key pair and broadcast the -[`SERVER_KEY_CHANGE`](https://http.cat/404) gateway event -to all clients. Clients must request new ID-Certs through a CSR. Should a client be offline at the time -of the key change, it must be informed of the change upon reconnection. +identity key pair and broadcast the [`SERVER_KEY_CHANGE`](https://http.cat/404) gateway event to all +clients. Clients must request new ID-Certs through a CSR. Should a client be offline at the time of +the key change, it must be informed of the change upon reconnection. #### 6.1.4 Early revocation of ID-Certs @@ -1371,10 +1388,10 @@ It is common for systems relying on X.509 certificates for user authentication t Revocation Lists (CRLs) to keep track of which certificates are no longer valid. This is done to prevent a user from using a certificate that has been revoked. -CRLs are difficult to implement well, often requiring many resources to keep up to date, and -are also not always reliable. OCSP (Online Certificate Status Protocol) is a more modern, reliable -and easier to implement alternative. Still, it potentially requires many resources to -keep up with demand while introducing potential privacy concerns. +CRLs are difficult to implement well, often requiring many resources to keep up to date, and are +also not always reliable. OCSP (Online Certificate Status Protocol) is a more modern, reliable and +easier to implement alternative. Still, it potentially requires many resources to keep up with +demand while introducing potential privacy concerns. polyproto inherently mitigates some of the possible misuse of a revoked certificate, as the validity of a certificate is usually checked by many parties. In particular, the revocation process is @@ -1397,19 +1414,19 @@ When an ID-Cert is revoked, the server must revoke the session associated with t Revoking an ID-Cert is considered a [sensitive action](#42-sensitive-actions) and therefore should require a second factor of authentication. -{/*TODO*/} +{/_TODO_/} :::bug "TODO" The following questions are still open: - Should actors always be able to revoke the ID-Cert they are sending the revocation message with - without needing to complete a sensitive action? + without needing to complete a sensitive action? - Currently, I cannot see any reason that would speak against this. - How can actors remain in control of their keys? If revocations need to be signed by the server, - then the server has more authority over keys than the actor does - - Revocations should likely never have to be signed by the server. Either that, or it does, - but the [trust model assumptions](#2-trust-model) apply. + then the server has more authority over keys than the actor does + - Revocations should likely never have to be signed by the server. Either that, or it does, but + the [trust model assumptions](#2-trust-model) apply. ::: @@ -1425,41 +1442,42 @@ revoking an ID-Cert are the same regardless of the server type. :::info[Revocation detection] -For information on how revocation detection is supposed to be handled, see [section 6.4](#64-caching-of-id-certs). +For information on how revocation detection is supposed to be handled, see +[section 6.4](#64-caching-of-id-certs). ::: ### 6.2 Actor identity keys and message signing -As briefly mentioned in section [#4](#4-federated-identity), users must hold on to an identity key pair -at all times. This key pair is used to represent an actor's identity and to verify -message integrity by having an actor sign all messages they send with their -private identity key. The key pair is generated by the actor. An actor-generated identity key -certificate signing request (CSR) is sent to the actor's home server when first connecting to the -server with a new session or when rotating keys. The key is stored in the client's local storage. -Upon receiving a new identity key CSR, a home server will sign this CSR and send the resulting ID-Cert -to the client. This certificate is proof that the home server attests to the client's key. Read +As briefly mentioned in section [#4](#4-federated-identity), users must hold on to an identity key +pair at all times. This key pair is used to represent an actor's identity and to verify message +integrity by having an actor sign all messages they send with their private identity key. The key +pair is generated by the actor. An actor-generated identity key certificate signing request (CSR) is +sent to the actor's home server when first connecting to the server with a new session or when +rotating keys. The key is stored in the client's local storage. Upon receiving a new identity key +CSR, a home server will sign this CSR and send the resulting ID-Cert to the client. This certificate +is proof that the home server attests to the client's key. Read [section 6.1](#61-home-server-signed-certificates-for-public-client-identity-keys-id-cert) for more information about the certificate. -The private key from the key pair that the server has generated an ID-Cert for will be used to create -digital signatures for the contents of all messages sent by this session. This digital signature must -be attached to the message itself so that other actors can verify the integrity of the message -contents. +The private key from the key pair that the server has generated an ID-Cert for will be used to +create digital signatures for the contents of all messages sent by this session. This digital +signature must be attached to the message itself so that other actors can verify the integrity of +the message contents. :::info -polyproto does not define what messages themselves look like, apart from this hard requirement. -The format of a message is up to polyproto extensions and implementations to define. +polyproto does not define what messages themselves look like, apart from this hard requirement. The +format of a message is up to polyproto extensions and implementations to define. ::: #### 6.2.1 Message verification -To ensure message integrity through signing, clients and servers must verify -message signatures. This involves cross-checking the message signature against the sender's -ID-Cert and the sender's home server's ID-Cert while also confirming the validity of the -ID-Cert attached to the message and ensuring its public key matches the sender's. +To ensure message integrity through signing, clients and servers must verify message signatures. +This involves cross-checking the message signature against the sender's ID-Cert and the sender's +home server's ID-Cert while also confirming the validity of the ID-Cert attached to the message and +ensuring its public key matches the sender's. :::info @@ -1470,10 +1488,9 @@ signatures and [weak public keys](https://en.wikipedia.org/wiki/Weak_key) must b :::tip[Example] -Say we have two actors. Alice, who is registered on Server A, and Bob, who is registered -on Server B. Alice and Bob **are having a conversation on Server B**. Given a signed message from -Alice, such as Bob would receive from Server B, the process of verifying the signature would look -like this: +Say we have two actors. Alice, who is registered on Server A, and Bob, who is registered on Server +B. Alice and Bob **are having a conversation on Server B**. Given a signed message from Alice, such +as Bob would receive from Server B, the process of verifying the signature would look like this: ```mermaid sequenceDiagram @@ -1499,26 +1516,25 @@ end b->>b: Verify signature of Alice's message (Fig. 4) ``` -*Fig. 3: Sequence diagram of a successful message signature verification.* +_Fig. 3: Sequence diagram of a successful message signature verification._ ::: :::note -You should read about the details of ID-Cert lookup load distribution via caching and why -Bob should first try to request Alice's certificate from Server B instead of Alice's home -server (Server A) in the [corresponding section of this protocol specification](#64-caching-of-id-certs). -Understanding both sections is crucial for building secure, scalable, and compliant -implementations of polyproto. +You should read about the details of ID-Cert lookup load distribution via caching and why Bob should +first try to request Alice's certificate from Server B instead of Alice's home server (Server A) in +the [corresponding section of this protocol specification](#64-caching-of-id-certs). Understanding +both sections is crucial for building secure, scalable, and compliant implementations of polyproto. ::: :::info -A failed signature verification does not always mean that the message is invalid. It may be that -the actor's identity key has changed, and that Server B has not yet received the new public -identity key for some reason. However, if the signature cannot be verified at a certain time, -this information must be communicated to the actor performing the verification. +A failed signature verification does not always mean that the message is invalid. It may be that the +actor's identity key has changed, and that Server B has not yet received the new public identity key +for some reason. However, if the signature cannot be verified at a certain time, this information +must be communicated to the actor performing the verification. ::: @@ -1527,17 +1543,17 @@ this information must be communicated to the actor performing the verification. In the context of federation with other federation protocols, such as ActivityPub, it is possible for actors to receive messages that do not have a signature attached to them. If a P2 extension explicitly allows for this, it is possible for a polyproto server to forward such messages to -clients. If a P2 extension does not explicitly allow for this, both servers and clients must -reject such messages. +clients. If a P2 extension does not explicitly allow for this, both servers and clients must reject +such messages. Before a polyproto server forwards such a message to clients, it must add an "external" property to the message object. If possible in the data format used, this property should be set to a boolean -value of `true` or a value that can be interpreted in an equivalent manner. -This property must be passed along to the client or clients receiving the message. +value of `true` or a value that can be interpreted in an equivalent manner. This property must be +passed along to the client or clients receiving the message. If the actor receiving this external message is human or otherwise sentient, the client application -should inform the actor that the message is external and that the message has not been signed by -the sender. External messages should be distinguishable from signed messages at first glance, especially +should inform the actor that the message is external and that the message has not been signed by the +sender. External messages should be distinguishable from signed messages at first glance, especially when viewed through a client application. ### 6.3 Private key loss prevention and private key recovery @@ -1546,18 +1562,18 @@ As described in previous sections, actors must hold on to their past identity ke want or need to migrate their account. Home servers must offer a way for actors to upload and recover their private identity keys while not -having access to the private keys themselves. Private identity keys must be encrypted with -strong passphrases and encryption schemes such as AES before being uploaded to the server. -Authenticated actors can download their encrypted private identity keys from the server at any time. -All encryption and decryption operations must be done client-side. +having access to the private keys themselves. Private identity keys must be encrypted with strong +passphrases and encryption schemes such as AES before being uploaded to the server. Authenticated +actors can download their encrypted private identity keys from the server at any time. All +encryption and decryption operations must be done client-side. If any uncertainty about the availability of the home server exists, clients should regularly download their encrypted private identity keys from the server and store them in a secure location. Ideally, each client should immediately download their encrypted private identity keys from the server after connecting. Clients must never store key backups in an unencrypted manner. -Whether an actor uploads their encrypted private identity keys to the server is their own choice. -It is also recommended to back up the encrypted private identity keys in some other secure location. +Whether an actor uploads their encrypted private identity keys to the server is their own choice. It +is also recommended to back up the encrypted private identity keys in some other secure location. The APIs for managing encrypted private identity keys are documented in the [API documentation](https://apidocs.polyproto.org). @@ -1567,24 +1583,25 @@ The APIs for managing encrypted private identity keys are documented in the Actors can make use of the [migration APIs](#7-migrations) to reduce the number of ID-Certs/keys that they must hold on to to migrate their account in the future. -For example, if an actor currently has messages signed with 20 different ID-Certs but only uses -2 clients (meaning that the actor always needs two active ID-Certs—one for each client), -the 18 outdated/unused ID-Certs could be consolidated into one ID-Cert through [re-signing the messages](#72-re-signing-messages) -made with the outdated ID-Certs with any other ID-Cert. +For example, if an actor currently has messages signed with 20 different ID-Certs but only uses 2 +clients (meaning that the actor always needs two active ID-Certs—one for each client), the 18 +outdated/unused ID-Certs could be consolidated into one ID-Cert through +[re-signing the messages](#72-re-signing-messages) made with the outdated ID-Certs with any other +ID-Cert. :::warning -This drastically reduces the number of ID-Certs the actor needs to keep track of and hold on -to, which may make re-signing messages in the future easier. +This drastically reduces the number of ID-Certs the actor needs to keep track of and hold on to, +which may make re-signing messages in the future easier. -However, doing this also introduces additional risks, as the overwhelming majority of the -actor's message history is now associated with one ID-Cert. **An accidental leak of the -private identity key of that ID-Cert could likely not be recovered from,** since all associated -messages are potentially under control by those who know the private identity key. +However, doing this also introduces additional risks, as the overwhelming majority of the actor's +message history is now associated with one ID-Cert. **An accidental leak of the private identity key +of that ID-Cert could likely not be recovered from,** since all associated messages are potentially +under control by those who know the private identity key. -Actors and polyproto software developers must keep this information in mind, should -consider whether the risks and benefits of this strategy are worth it for their use case and -can introduce additional strategies to manage the number of "relevant" private keys safely. +Actors and polyproto software developers must keep this information in mind, should consider whether +the risks and benefits of this strategy are worth it for their use case and can introduce additional +strategies to manage the number of "relevant" private keys safely. ::: @@ -1592,13 +1609,13 @@ can introduce additional strategies to manage the number of "relevant" private k ### 6.4 Caching of ID-Certs -The caching of ID-Certs is an important mechanism in polyproto to aid in fairly distributing the load -generated by ID-Cert lookups to the servers generating the traffic, not to the server the ID-Cert -is actually from. This practice should help make the operation of low-resource home servers, used -exclusively for hosting identities, more viable. +The caching of ID-Certs is an important mechanism in polyproto to aid in fairly distributing the +load generated by ID-Cert lookups to the servers generating the traffic, not to the server the +ID-Cert is actually from. This practice should help make the operation of low-resource home servers, +used exclusively for hosting identities, more viable. -This section of the protocol definition defines required behaviors related to the correct caching -of ID-Certs for both home servers and clients. +This section of the protocol definition defines required behaviors related to the correct caching of +ID-Certs for both home servers and clients. To make this section more understandable, we will bring back the example from section 6.2.1: @@ -1606,10 +1623,9 @@ To make this section more understandable, we will bring back the example from se :::tip[Example] -Say we have two actors. Alice, who is registered on Server A, and Bob, who is registered -on Server B. Alice and Bob **are having a conversation on Server B**. Given a signed message -from Alice, such as Bob would receive from Server B, the process of verifying the signature -would look like this: +Say we have two actors. Alice, who is registered on Server A, and Bob, who is registered on Server +B. Alice and Bob **are having a conversation on Server B**. Given a signed message from Alice, such +as Bob would receive from Server B, the process of verifying the signature would look like this: ```mermaid sequenceDiagram @@ -1635,45 +1651,44 @@ end b->>b: Verify signature of Alice's message (Fig. 4) ``` -*Fig. 3: Sequence diagram of a successful message signature verification.* +_Fig. 3: Sequence diagram of a successful message signature verification._ ::: In the case where `alice@server-a.example.com` and `bob@server-b.example.com` are having a -conversation where the communications server is any server other than `server-a.example.com`, -Bob should request Alice's ID-Cert from that server first, instead of from `server-a.example.com`. +conversation where the communications server is any server other than `server-a.example.com`, Bob +should request Alice's ID-Cert from that server first, instead of from `server-a.example.com`. :::: :::note[Further notes on why we consider this cached distribution process a good idea] -Bob's client could request Alice's public identity key from Server A, instead of Server B. -However, this is discouraged, as it +Bob's client could request Alice's public identity key from Server A, instead of Server B. However, +this is discouraged, as it -- Generates unnecessary load on Server A; Doing it this way distributes the load of public - identity key requests more fairly, as the server that the message was sent on is the one that - has to process the bulk of public identity certificate requests. -- Would expose unnecessary metadata to Server A; Server A does not need to know who exactly - Alice is talking to, and when. Only Server B, Alice, and Bob need to know this information. - Always requesting the public identity key from Server A might expose this information to - Server A. +- Generates unnecessary load on Server A; Doing it this way distributes the load of public identity + key requests more fairly, as the server that the message was sent on is the one that has to + process the bulk of public identity certificate requests. +- Would expose unnecessary metadata to Server A; Server A does not need to know who exactly Alice is + talking to, and when. Only Server B, Alice, and Bob need to know this information. Always + requesting the public identity key from Server A might expose this information to Server A. -Clients should only use Server A as a fallback for public identity key verification if Server B -does not respond to the request for Alice's public identity key, or if the verification fails -with the public identity key from Server B. Security considerations listed in this section of -the protocol definition ensure that this cached distribution process is safe and trustworthy. +Clients should only use Server A as a fallback for public identity key verification if Server B does +not respond to the request for Alice's public identity key, or if the verification fails with the +public identity key from Server B. Security considerations listed in this section of the protocol +definition ensure that this cached distribution process is safe and trustworthy. ::: Both Bob's client and Server B should now cache Server A's and Alice's ID-Certs to avoid having to request them again. -The TTL (time to live) of these cached items should be relatively short. Recommended values -are between one (1) and twelve (12) hours. Cached ID-Certs must be evicted from -the cache after the TTL has expired. Expired cached ID-Certs must not be used for signature -verification of new messages, even if the client cannot renew its cache. All of this applies to both -servers and clients. The TTL for a certificate's cache duration is dictated by the home server -that certificate has been issued by. You can read more on that in +The TTL (time to live) of these cached items should be relatively short. Recommended values are +between one (1) and twelve (12) hours. Cached ID-Certs must be evicted from the cache after the TTL +has expired. Expired cached ID-Certs must not be used for signature verification of new messages, +even if the client cannot renew its cache. All of this applies to both servers and clients. The TTL +for a certificate's cache duration is dictated by the home server that certificate has been issued +by. You can read more on that in [subsection 1 of this section](#641-verifying-that-a-newly-retrieved-id-cert-is-not-out-of-date). ::::question[Why not select longer-lived TTLs for cached ID-Certs?] @@ -1687,24 +1702,23 @@ victim "Alice": :::tip[Downside of using higher values for a TTL] 1. One of Alice's private identity keys is compromised. -2. Malicious actor Eve logs onto Server X, which Alice has never connected to before, using - Alice's ID-Cert, of which the corresponding private identity key has been compromised. -3. In the meantime, Alice notices the breach, requesting the revocation of her ID-Cert on - all servers she is connected to. -4. Server X does not get this revocation message, as Alice does not know about her connection - to Server X, where Eve is impersonating Alice. -5. Eve can now impersonate Alice on Server X for as long as the TTL of the cached ID-Cert on - Server X has not expired. With a high value, this could be a long time. +2. Malicious actor Eve logs onto Server X, which Alice has never connected to before, using Alice's + ID-Cert, of which the corresponding private identity key has been compromised. +3. In the meantime, Alice notices the breach, requesting the revocation of her ID-Cert on all + servers she is connected to. +4. Server X does not get this revocation message, as Alice does not know about her connection to + Server X, where Eve is impersonating Alice. +5. Eve can now impersonate Alice on Server X for as long as the TTL of the cached ID-Cert on Server + X has not expired. With a high value, this could be a long time. ::: :::: -If the verification fails, Bob's client should try to re-request the key from Server B first. -Should the verification fail again, Bob's client can try to request Alice's public identity key -and ID-Cert from Server A (Alice's home server). The signature verification process should then be -retried. Should the verification still not succeed, the message should be treated with extreme -caution. +If the verification fails, Bob's client should try to re-request the key from Server B first. Should +the verification fail again, Bob's client can try to request Alice's public identity key and ID-Cert +from Server A (Alice's home server). The signature verification process should then be retried. +Should the verification still not succeed, the message should be treated with extreme caution. ```mermaid sequenceDiagram @@ -1734,9 +1748,9 @@ else Verification succeeds end ``` -*Fig. 4: Sequence diagram showing how message verification should be handled if the first attempt -to verify the signature fails, continuing the example of a conversation happening on a server -"B" between Bob from a random server and Alice from server A* +_Fig. 4: Sequence diagram showing how message verification should be handled if the first attempt to +verify the signature fails, continuing the example of a conversation happening on a server "B" +between Bob from a random server and Alice from server A_ After evicting a cached ID-Cert: @@ -1746,45 +1760,47 @@ After evicting a cached ID-Cert: :::info -It is *not* of vital importance that a client requests an ID-Cert of an actor whose ID-Cert has -just been evicted from the cache from the server, where the actor was last seen by the client -*precisely*. This means that a client application doesn't necessarily need to update an internal +It is _not_ of vital importance that a client requests an ID-Cert of an actor whose ID-Cert has just +been evicted from the cache from the server, where the actor was last seen by the client +_precisely_. This means that a client application doesn't necessarily need to update an internal state of where that actor has last been seen every single time that actor sends a message somewhere. -This internal state update could instead happen every 5, 30, or even 60 seconds. What *is* -important, however, is that this state update does eventually happen within a reasonable amount -of time, to help achieve the goal of dynamic server load distribution. +This internal state update could instead happen every 5, 30, or even 60 seconds. What _is_ +important, however, is that this state update does eventually happen within a reasonable amount of +time, to help achieve the goal of dynamic server load distribution. ::: #### 6.4.1 Verifying that a newly retrieved ID-Cert is not out of date While the goal of achieving dynamic server load distribution to increase the viability of small, -low-resource home servers is a noble one, this goal must not undermine [P2s trust model](#2-trust-model), -which other aspects of the protocol work very hard to uphold. Retrieving ID-Certs from a middleman -introduces a new attack surface that must be mitigated. Consider the following example: +low-resource home servers is a noble one, this goal must not undermine +[P2s trust model](#2-trust-model), which other aspects of the protocol work very hard to uphold. +Retrieving ID-Certs from a middleman introduces a new attack surface that must be mitigated. +Consider the following example: :::tip[Example attack abusing blind middleman trust] 1. One of Alice's private identity keys is compromised. 2. Malicious actor Eve logs onto a malicious Server X, which is controlled by Eve, impersonating - Alice by using Alice's ID-Cert, of which the corresponding private identity key has been compromised. -3. In the meantime, Alice notices the breach, requesting the revocation of her ID-Cert on - all servers she is connected to. + Alice by using Alice's ID-Cert, of which the corresponding private identity key has been + compromised. +3. In the meantime, Alice notices the breach, requesting the revocation of her ID-Cert on all + servers she is connected to. 4. Server X does not care for this revocation message, as it is malicious (attacker controlled). 5. Eventually, the TTL for this compromised certificate expires. Users on Server X contact the - server for the latest certificate of Alice. + server for the latest certificate of Alice. 6. Server X responds with the compromised ID-Cert, claiming that this is the most up-to-date - ID-Cert, even though it has been revoked. -7. Through all users trusting Server X blindly, Eve and Server X can impersonate Alice for as - long as Alice's compromised ID-Cert would have been valid for (valid-not-after attribute in X.509 - certificates). Until then, users do not notice that this certificate has been revoked and - should no longer be valid. + ID-Cert, even though it has been revoked. +7. Through all users trusting Server X blindly, Eve and Server X can impersonate Alice for as long + as Alice's compromised ID-Cert would have been valid for (valid-not-after attribute in X.509 + certificates). Until then, users do not notice that this certificate has been revoked and should + no longer be valid. ::: This kind of attack mentioned above has been considered and mitigated in polyproto. This mitigation -is achieved through API behaviors enabling the fetching of actor ID-Certs with additional information -attached to the response body. The additional information is structured as follows: +is achieved through API behaviors enabling the fetching of actor ID-Certs with additional +information attached to the response body. The additional information is structured as follows: | Field name | JSON type | Actual type (if different from JSON type) | Description | | --------------------- | --------- | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -1794,13 +1810,13 @@ attached to the response body. The additional information is structured as follo | `invalidatedAt` | String? | Unsigned 64-bit integer | If present, represents a UNIX timestamp at which the certificate was [invalidated](#614-early-revocation-of-id-certs) on. Certificate was not prematurely invalidated if not present. | A server generates the `cacheSignature` by concatenating the serial number of the ID-Cert in -question with the `cacheValidNotBefore` timestamp, the `cacheValidNotAfter` timestamp, and the `invalidatedAt` -timestamp, if present. +question with the `cacheValidNotBefore` timestamp, the `cacheValidNotAfter` timestamp, and the +`invalidatedAt` timestamp, if present. :::warning -The order in which the concatenation operations are executed is important and must be adhered -to. The order is as follows: +The order in which the concatenation operations are executed is important and must be adhered to. +The order is as follows: `cacheSignature ⋅ cacheValidNotBefore ⋅ cacheValidNotAfter ⋅ (invalidatedAt|"")¹` @@ -1808,9 +1824,8 @@ to. The order is as follows: ::: -The resulting string is signed using the home servers private identity key. -Clients must reject certificates of which the `cacheSignature` can not be verified to be -correct. +The resulting string is signed using the home servers private identity key. Clients must reject +certificates of which the `cacheSignature` can not be verified to be correct. :::note[Note/Fun fact] @@ -1829,10 +1844,11 @@ Concatenation operations are not commutative. :::note[Definition: Concatenation] > In formal language theory and computer programming, string concatenation is the operation of -joining character strings end-to-end. For example, the concatenation of "snow" and "ball" is -"snowball". +> joining character strings end-to-end. For example, the concatenation of "snow" and "ball" is +> "snowball". -*From Wikipedia, The Free Encyclopedia. [Source](https://en.wikipedia.org/w/index.php?title=Concatenation&oldid=1266032132#:~:text=In%20formal%20language,a%20primitive%20notion.)* +_From Wikipedia, The Free Encyclopedia. +[Source](https://en.wikipedia.org/w/index.php?title=Concatenation&oldid=1266032132#:~:text=In%20formal%20language,a%20primitive%20notion.)_ ::: @@ -1844,45 +1860,46 @@ caching to be used without conflicting with the [trust model](#2-trust-model) of :::info[Scenarios requiring cache and validity verification] -**Only** the following scenarios **must require** a server to retrieve, validate and supply invalidation -and cache information about a foreign actor's ID-Cert: +**Only** the following scenarios **must require** a server to retrieve, validate and supply +invalidation and cache information about a foreign actor's ID-Cert: - **Sending messages:** Before a foreign actor is allowed to send any messages on the server. This - automatically applies again if the ID-Cert is changed through any means. + automatically applies again if the ID-Cert is changed through any means. - **ID-Cert request:** When the server receives a request for a foreign actor's ID-Cert, the server - must fetch and validate invalidation and cache information about the foreign actor's ID-Cert before - completing the request. + must fetch and validate invalidation and cache information about the foreign actor's ID-Cert + before completing the request. ::: :::info[Scenarios **not** requiring cache and validity verification] -The following scenarios **must explicitly not require** a server to retrieve, verify or supply invalidation -and cache information about a foreign actor's ID-Cert: +The following scenarios **must explicitly not require** a server to retrieve, verify or supply +invalidation and cache information about a foreign actor's ID-Cert: -- **Requesting a challenge string:** When a foreign actor requests a challenge string from the server. +- **Requesting a challenge string:** When a foreign actor requests a challenge string from the + server. - **Requesting a key trial:** When a foreign actor requests a key trial from the server. - **Completing a key trial:** When a foreign actor completes a key trial from the server. - **Re-signing messages request:** When a foreign actor requests to re-sign messages on the server. -- **Re-signing messages abortion request:** When a foreign actor requests to abort the re-signing - of messages on the server. +- **Re-signing messages abortion request:** When a foreign actor requests to abort the re-signing of + messages on the server. - **Re-signing messages commitment:** When a foreign actor commits re-signed messages to the server. -- **Re-signing messages commitment:** When a foreign actor fetches messages to-be re-signed from - the server. -- **Requesting a redirect:** When a foreign ("new") actor asks the server of the "old" server to - set up a redirect to the "new" actor. +- **Re-signing messages commitment:** When a foreign actor fetches messages to-be re-signed from the + server. +- **Requesting a redirect:** When a foreign ("new") actor asks the server of the "old" server to set + up a redirect to the "new" actor. - **Key trial information request:** When an actor requests information about completed key trials - from the foreign actor. + from the foreign actor. - **Requesting a home server ID-Cert:** When an actor requests the ID-Cert of a home server — an - action that can only be performed by asking the home server in question directly, that ID-Cert - mustn't contain cache and validity information. Since home server ID-Certs are self-signed, - cache and validity information would not benefit anyone. + action that can only be performed by asking the home server in question directly, that ID-Cert + mustn't contain cache and validity information. Since home server ID-Certs are self-signed, cache + and validity information would not benefit anyone. ::: polyproto implementation must not require cache and validity verification on any route not specified -in the above information block, except if a [p2-extension](#8-protocol-extensions-p2-extensions) states -otherwise. +in the above information block, except if a [p2-extension](#8-protocol-extensions-p2-extensions) +states otherwise. ### 6.5 Cryptographic specifications @@ -1892,18 +1909,19 @@ However, certificates and messages must be made available with Ed25519 signature ### 6.6 Best practices -The following subsections are dedicated to documenting best practices to consider when -implementing polyproto. +The following subsections are dedicated to documenting best practices to consider when implementing +polyproto. #### 6.6.1 Signing keys and ID-Certs - When a server is asked to generate a new ID-Cert for an actor, it must make sure that the CSR is - valid and, if set, has an expiry date less than or equal to the expiry date of the server's own ID-Cert. + valid and, if set, has an expiry date less than or equal to the expiry date of the server's own + ID-Cert. - Due to the fact that a `SERVER_KEY_CHANGE` gateway event is bound to generate a large amount of traffic, servers should only manually generate a new identity key pair when absolutely necessary - and instead select a fitting expiry date interval for their ID-Certs. It might - also be a good idea to stagger the sending of `SERVER_KEY_CHANGE` gateway events to prevent a - server from initiating a DDoS attack on itself. + and instead select a fitting expiry date interval for their ID-Certs. It might also be a good idea + to stagger the sending of `SERVER_KEY_CHANGE` gateway events to prevent a server from initiating a + DDoS attack on itself. - When a client or server receives the information that an actor's client identity key has been changed, the client/server in question should update their cached ID-Cert for the actor in question, taking into account the session ID of the new identity key pair. @@ -1915,8 +1933,8 @@ implementing polyproto. #### 6.6.3 Private key loss prevention and private key recovery -- It is a good idea for home servers to limit the upload size and available upload slots for encrypted - private identity keys. +- It is a good idea for home servers to limit the upload size and available upload slots for + encrypted private identity keys. ## 7. Migrations @@ -1928,18 +1946,18 @@ to a new identity. This allows actors to switch home servers while not losing ow sent by them. Message migration allows actors to move messages from one service provider to another in a -tamper-resistant way. This makes it possible for actors to switch service providers, taking some -or all of their messages with them. Which messages can be moved is up to P2 extensions to define, -as it might not always be possible to move all messages. Some messages might be tied to a -specific context, which is unavailable on the new server. +tamper-resistant way. This makes it possible for actors to switch service providers, taking some or +all of their messages with them. Which messages can be moved is up to P2 extensions to define, as it +might not always be possible to move all messages. Some messages might be tied to a specific +context, which is unavailable on the new server. :::tip[Example: Information tied to a specific context] In a chat application, there might exist a group chat with a lot of people in it. Moving your messages from this group chat to another server might be impossible, depending on the architecture -of the chat application. Typically, the messages in a group chat are stored on the server -hosting the group. Moving the messages of one individual from one server to another is not -possible in these cases. +of the chat application. Typically, the messages in a group chat are stored on the server hosting +the group. Moving the messages of one individual from one server to another is not possible in these +cases. ::: @@ -1947,26 +1965,25 @@ possible in these cases. :::tip[Example: Information not necessarily tied to a specific context] -Continuing the chat application example, it might very well be possible to move messages -written in a private chat between two actors from one server to another. An exemplary -architecture where this is possible is where all private messages are stored on the server of -the actor who sent the message. Here, an actor can move their messages to another server without -any issues. +Continuing the chat application example, it might very well be possible to move messages written in +a private chat between two actors from one server to another. An exemplary architecture where this +is possible is where all private messages are stored on the server of the actor who sent the +message. Here, an actor can move their messages to another server without any issues. ::: Migrating an actor always involves reassigning the ownership of all actor-associated data in the distributed network to the new actor. Should the old actor want to additionally move all data from -the old home server to another home server, more steps are needed. Account migration is not considered -a sensitive action. +the old home server to another home server, more steps are needed. Account migration is not +considered a sensitive action. This chapter defines behaviors and security mechanisms associated with migrating an actor identity or messages. ### 7.1 Identity migration -Transferring message ownership from an old to a new account, known as -identity migration, necessitates coordination between the two involved accounts. +Transferring message ownership from an old to a new account, known as identity migration, +necessitates coordination between the two involved accounts. Identity migration is a process that can be broken down into the following steps: @@ -1980,17 +1997,17 @@ to which extent they wish to perform the migration. #### 7.1.1 Redirects -Setting up a redirect is an optional step in the identity migration process, helping -make the transition from the old account to the new account smoother. +Setting up a redirect is an optional step in the identity migration process, helping make the +transition from the old account to the new account smoother. -A redirect has to be confirmed by both the redirection source and the redirection target. The redirect -is only valid for one specific redirection target. Redirection targets must be valid actors, and their -home servers must be reachable when the redirect is being set up. +A redirect has to be confirmed by both the redirection source and the redirection target. The +redirect is only valid for one specific redirection target. Redirection targets must be valid +actors, and their home servers must be reachable when the redirect is being set up. :::info -"Optional" does not mean that home servers can choose to not implement this feature. Instead, -it means that actors can choose to not use this feature. +"Optional" does not mean that home servers can choose to not implement this feature. Instead, it +means that actors can choose to not use this feature. ::: @@ -2025,11 +2042,11 @@ else end ``` -*Fig. 5: Sequence diagram depicting the setting up of a redirect.* +_Fig. 5: Sequence diagram depicting the setting up of a redirect._ Until a redirection source actor deletes their account, the home server of that actor should respond -with `307 Temporary Redirect` to requests for information about the redirection source. After -the redirection source deletes their account, Server A can select to either respond with +with `307 Temporary Redirect` to requests for information about the redirection source. After the +redirection source deletes their account, Server A can select to either respond with `308 Permanent Redirect`, or to remove the redirect entirely. ### 7.2 Re-signing messages @@ -2039,16 +2056,16 @@ the content of the messages unchanged. "Transparently" refers to the fact that a verify the following facts: - Both involved actors have agreed to the re-signing of the messages. -- The "old" actor has proven ownership of the signature keys used to produce the "old" signatures - of the messages. +- The "old" actor has proven ownership of the signature keys used to produce the "old" signatures of + the messages. - The message content has not changed during the re-signing process. The intended use cases for re-signing messages are: - Changing ownership of messages from one actor to another. This enables seamless transitions between accounts while preserving the integrity of the messages. -- Reducing the amount of keys that need to be remembered by an actor is done if the actor deems it to - be convenient. +- Reducing the amount of keys that need to be remembered by an actor is done if the actor deems it + to be convenient. - "Rotate keys of past messages" - This is useful when an actor's private identity key has been compromised, and the actor wants to ensure that all messages sent by them are still owned by them and not at risk of being tampered with. @@ -2062,8 +2079,8 @@ Additionally, servers must verify the following things about re-signed messages: server. - The ID-Cert corresponding to the new signature has a public key that was specified in the `allowedResigningKeys` property sent to the server when message re-signing was requested. -- The `expires` UNIX timestamp, specified when the server replied to the re-signing request, - has not been reached or passed when the re-signed message was received by the server. +- The `expires` UNIX timestamp, specified when the server replied to the re-signing request, has not + been reached or passed when the re-signed message was received by the server. Below is a sequence diagram depicting a typical re-signing process, which transfers ownership of messages from Alice A to Alice B. @@ -2098,16 +2115,16 @@ end ``` To allow for a singular set of behaviors, which fit the three intended use cases mentioned prior, -not all messages stored by the server of an actor need to be re-signed. -Besides querying for all non-re-signed messages, actors can also query for all non-resigned -message whose signatures correspond to a specific ID-Cert. The API routes -for re-signing messages are documented in the [API documentation](https://apidocs.polyproto.org). +not all messages stored by the server of an actor need to be re-signed. Besides querying for all +non-re-signed messages, actors can also query for all non-resigned message whose signatures +correspond to a specific ID-Cert. The API routes for re-signing messages are documented in the +[API documentation](https://apidocs.polyproto.org). #### 7.2.1 Message batches -Messages that have not yet been re-signed are being delivered to an actor in batches. A batch is -a JSON object, representing messages sent using the same ID-Cert. An -exemplary array of message batches, as returned by the server, might look as follows: +Messages that have not yet been re-signed are being delivered to an actor in batches. A batch is a +JSON object, representing messages sent using the same ID-Cert. An exemplary array of message +batches, as returned by the server, might look as follows: ```json [ @@ -2155,45 +2172,44 @@ the client. ##### 7.2.2.1 Body size -Servers can limit the size of an HTTP request body containing re-signed messages. -If a body size limit is imposed, the server must communicate -this to clients in their response to a query for messages that have not yet been re-signed. -Communicating the body size limit is done by adding an `X-P2-Return-Body-Size-Limit` header to the -response. If this header is not present or has a value of `0`, clients should assume that there is -no body size limit. +Servers can limit the size of an HTTP request body containing re-signed messages. If a body size +limit is imposed, the server must communicate this to clients in their response to a query for +messages that have not yet been re-signed. Communicating the body size limit is done by adding an +`X-P2-Return-Body-Size-Limit` header to the response. If this header is not present or has a value +of `0`, clients should assume that there is no body size limit. ##### 7.2.2.2 Interval between re-signing batches -Servers must define an interval, which a client must wait for before sending a new batch of re-signed -messages to the server. +Servers must define an interval, which a client must wait for before sending a new batch of +re-signed messages to the server. The server communicates this interval to the client as a response to receiving a batch of re-signed -messages from the client. The interval is communicated by adding a -`Retry-After` header to the response. The value of this header is a 16-bit integer. The integer -represents a delay in seconds that a client must wait for before sending the next batch of re-signed -messages. +messages from the client. The interval is communicated by adding a `Retry-After` header to the +response. The value of this header is a 16-bit integer. The integer represents a delay in seconds +that a client must wait for before sending the next batch of re-signed messages. Clients should expect that the duration of the interval changes between batches. The server can -dynamically adjust the duration that a client must wait before being allowed to send the next -batch of re-signed messages. The server can also select to not impose an interval between re-signing +dynamically adjust the duration that a client must wait before being allowed to send the next batch +of re-signed messages. The server can also select to not impose an interval between re-signing batches. Clients should also expect that the server suddenly decides to impose an interval between re-signing batches, even if it has not done so before. -If this header has a value of `0`, clients should assume that there is no interval -between re-signing batches. +If this header has a value of `0`, clients should assume that there is no interval between +re-signing batches. -*Fig. 7: Sequence diagram depicting the re-signing procedure.* +_Fig. 7: Sequence diagram depicting the re-signing procedure._ ### 7.3 Moving data -In cases of an imminent server shutdown or distrust in the old server, moving data from the old server -is necessary to prevent data loss. +In cases of an imminent server shutdown or distrust in the old server, moving data from the old +server is necessary to prevent data loss. Note that only ["static" resources](#example-static-information) can be moved. "Dynamic" resources, -which are resources tied to a specific context, can be migrated through [re-signing messages](#72-re-signing-messages). +which are resources tied to a specific context, can be migrated through +[re-signing messages](#72-re-signing-messages). -This process extends upon the reassigning ownership process and -usually involves the following steps: +This process extends upon the reassigning ownership process and usually involves the following +steps: 1. Using the old account, the client requests a data export from their old home server. 2. The old home server sends a data export to the client. The client will check the signatures on @@ -2224,7 +2240,7 @@ sb->>ab: Data import successful aa-xsa: Deactivate account ``` -*Fig. 8: Sequence diagram depicting the data-moving process.* +_Fig. 8: Sequence diagram depicting the data-moving process._ The API routes for data export and import are documented in the [API documentation](https://apidocs.polyproto.org) @@ -2238,107 +2254,112 @@ layer of indirection. :::tip[Example] In a chat service, a user might have posted a message containing a picture. In this example, the -picture is stored on the user's home server, which is not necessarily the same server as the -chat service. If the user moves their account to another server, the picture might not be -accessible anymore. +picture is stored on the user's home server, which is not necessarily the same server as the chat +service. If the user moves their account to another server, the picture might not be accessible +anymore. ::: -Resource addressing with relative roots aids in preventing this issue. Instead of referring to -the absolute URL of the resource, the server processing the resource generates a unique identifier. -This identifier can be used to retrieve the resource from the server. Most importantly, this -identifier does not change when the resource is moved to another server. If the base domain of the -new server is known, the identifier can be used to retrieve the resource from the new server. -The "relative root" is the base domain of the server, which is used to retrieve the resource. +Resource addressing with relative roots aids in preventing this issue. Instead of referring to the +absolute URL of the resource, the server processing the resource generates a unique identifier. This +identifier can be used to retrieve the resource from the server. Most importantly, this identifier +does not change when the resource is moved to another server. If the base domain of the new server +is known, the identifier can be used to retrieve the resource from the new server. The "relative +root" is the base domain of the server, which is used to retrieve the resource. -The uniqueness constraint of the identifier is important. If a collision occurs when trying to -move the resource to another server, the resource cannot be migrated in a way that preserves the -references to it. One way to ensure the uniqueness of the identifier is to use a hash function on the -resource itself. Combining this hash with a cryptographically strong nonce, then hashing the result of -concatenating the nonce and the hash of the resource should yield a unique identifier. +The uniqueness constraint of the identifier is important. If a collision occurs when trying to move +the resource to another server, the resource cannot be migrated in a way that preserves the +references to it. One way to ensure the uniqueness of the identifier is to use a hash function on +the resource itself. Combining this hash with a cryptographically strong nonce, then hashing the +result of concatenating the nonce and the hash of the resource should yield a unique identifier. The URI for resource addressing with relative roots is formatted as follows: `/.p2/core/resource/` -Uploaded resources can be made private, and access to them can be controlled via allow- and deny lists, -specifying access properties for each individual resource. Individual actors and entire instances can -be part of these allow- and deny lists. Marking a resource as private restricts access to only the -uploader and the actors and instances that are part of the allow list. APIs and JSON schemas -associated with access control are part of the [API documentation](https://apidocs.polyproto.org). +Uploaded resources can be made private, and access to them can be controlled via allow- and deny +lists, specifying access properties for each individual resource. Individual actors and entire +instances can be part of these allow- and deny lists. Marking a resource as private restricts access +to only the uploader and the actors and instances that are part of the allow list. APIs and JSON +schemas associated with access control are part of the +[API documentation](https://apidocs.polyproto.org). The API routes for resource addressing with relative roots are documented more thoroughly in the [API documentation](https://apidocs.polyproto.org). Servers with no need for resource addressing with relative roots can select to not implement this -feature. Servers not implementing this feature should return a `404 Not Found` status code when -the API route is accessed. Clients should expect finding servers not implementing this feature. +feature. Servers not implementing this feature should return a `404 Not Found` status code when the +API route is accessed. Clients should expect finding servers not implementing this feature. #### 7.3.2 polyproto export/import format Data exports and -imports must use the polyproto export/import format. Home servers are required to support this format when actors perform data exports and imports. -The data is a [gzipped](https://en.wikipedia.org/wiki/Gzip) [tarball](https://en.wikipedia.org/wiki/Tar_(computing)) -archive (.tar.gz) named `export1234567890-user@subdomain.example.com`, where +The data is a [gzipped](https://en.wikipedia.org/wiki/Gzip) +[tarball]() archive (.tar.gz) named +`export1234567890-user@subdomain.example.com`, where - `export[numbers]` is the word `export` with 20 random digits appended to it - `user` is the actor's name - `subdomain.example.com` is the domain name of the server the actor is registered on. -This file archive contains a file `messages.p2mb`, which is a JSON file containing [message batches](#721-message-batches) -of all messages sent by the user. If the server where the data export was requested from has -[RawR](#731-resource-addressing-with-relative-roots) enabled, the file archive will contain a -folder named `rawr`. This folder contains all RawR content uploaded by the actor to that server. -The files in this folder are named after the resource ID given to the resource. -File extensions are only added if they were known to the server. +This file archive contains a file `messages.p2mb`, which is a JSON file containing +[message batches](#721-message-batches) of all messages sent by the user. If the server where the +data export was requested from has [RawR](#731-resource-addressing-with-relative-roots) enabled, the +file archive will contain a folder named `rawr`. This folder contains all RawR content uploaded by +the actor to that server. The files in this folder are named after the resource ID given to the +resource. File extensions are only added if they were known to the server. :::tip[Example] An example file name might be -`2c851bfb6daffa944fa1723c7bd4d362ffbc9defe292f2daaf05e895989d179b.jxl`, referencing the file -which was hosted at `/.p2/core/resource/2c851bfb6daffa944fa1723c7bd4d362ffbc9defe292f2daaf05e895989d179b.jxl`. +`2c851bfb6daffa944fa1723c7bd4d362ffbc9defe292f2daaf05e895989d179b.jxl`, referencing the file which +was hosted at +`/.p2/core/resource/2c851bfb6daffa944fa1723c7bd4d362ffbc9defe292f2daaf05e895989d179b.jxl`. ::: -In addition, the folder `rawr` contains a file named `access_properties.p2al`. This JSON -file contains a data structure mapping each resource ID to an access properties object. In particular, -the file is structured as an array containing objects. Each object has a key that is equal -to the resource ID of a resource in the `rawr` directory and a value that is an object -representing the access properties. An example of the contents of this file is given below: +In addition, the folder `rawr` contains a file named `access_properties.p2al`. This JSON file +contains a data structure mapping each resource ID to an access properties object. In particular, +the file is structured as an array containing objects. Each object has a key that is equal to the +resource ID of a resource in the `rawr` directory and a value that is an object representing the +access properties. An example of the contents of this file is given below: :::tip[Example of an `access_properties.p2al` file] ```json [ { - "2062a23e2a25b226ca4c546fec5ec06e0df9648281f45da8b5aaabebdf66cf4c.jxl": { - "private": false, - "allowlist": ["user1@example.com", "instance.example.com"], - "denylist": ["user2@example.com", "otherinstance@example.com"] - } + "2062a23e2a25b226ca4c546fec5ec06e0df9648281f45da8b5aaabebdf66cf4c.jxl": { + "private": false, + "allowlist": ["user1@example.com", "instance.example.com"], + "denylist": ["user2@example.com", "otherinstance@example.com"] + } }, { - "a9144379a161e1fcf6b07801b70db6d6c481933bd634fe2409eb713723ab1a0a": { - "private": true, - "allowlist": ["user1@example.com"], - "denylist": [] - } + "a9144379a161e1fcf6b07801b70db6d6c481933bd634fe2409eb713723ab1a0a": { + "private": true, + "allowlist": ["user1@example.com"], + "denylist": [] + } } ] ``` ::: -If the server where the data export was requested from is the actor's home server, the -archive will contain a folder `certs` and a file `crypt_certs.p2epk`. +If the server where the data export was requested from is the actor's home server, the archive will +contain a folder `certs` and a file `crypt_certs.p2epk`. The folder `certs` contains all ID-Certs the server has stored of the actor. The ID-Certs are stored -in [ASCII PEM format](https://web.archive.org/web/20250107131731/https://learn.microsoft.com/en-us/azure/iot-hub/reference-x509-certificates#:~:text=ASN.1%20encoding.-,ascii%20pem%20format,-A%20PEM%20certificate). +in +[ASCII PEM format](https://web.archive.org/web/20250107131731/https://learn.microsoft.com/en-us/azure/iot-hub/reference-x509-certificates#:~:text=ASN.1%20encoding.-,ascii%20pem%20format,-A%20PEM%20certificate). -The file `crypt_certs.p2epk` contains all [encrypted private key material](#63-private-key-loss-prevention-and-private-key-recovery) -that the actor has uploaded to the server. Just like `messages.p2mb`, `crypt_certs.p2epk` is a standard -JSON file. +The file `crypt_certs.p2epk` contains all +[encrypted private key material](#63-private-key-loss-prevention-and-private-key-recovery) that the +actor has uploaded to the server. Just like `messages.p2mb`, `crypt_certs.p2epk` is a standard JSON +file. ### 7.4 Challenges and trust @@ -2355,19 +2376,18 @@ re-signed. :::tip[Example] -In the case of a social media platform with quote-posting functionality, it is reasonable to -assume that re-signing a quoted post is allowed. However, this would likely change the -signature of the quoted post, which would be undesirable. Edge cases like these are up to -implementations to handle and should be well documented. +In the case of a social media platform with quote-posting functionality, it is reasonable to assume +that re-signing a quoted post is allowed. However, this would likely change the signature of the +quoted post, which would be undesirable. Edge cases like these are up to implementations to handle +and should be well documented. ::: ## 8. Protocol extensions (P2 extensions) -polyproto leaves room for extensions, outsourcing concepts such as concrete -message types to protocol extensions. This allows for a more flexible -core protocol, which can be adapted to a wide variety of use cases. The following sections -define: +polyproto leaves room for extensions, outsourcing concepts such as concrete message types to +protocol extensions. This allows for a more flexible core protocol, which can be adapted to a wide +variety of use cases. The following sections define: - protocol extensions, also called P2 extensions - how protocol extensions interact with the core protocol @@ -2375,40 +2395,40 @@ define: ### 8.1 Extension design -P2 extensions *should* be either of the following: +P2 extensions _should_ be either of the following: -- a **major** technological addition, which can be taken advantage of -by other extensions. Examples of this are: - - a unified WebSocket Gateway connection scheme - - Message Layer Encryption (MLS) - - Compatibility with other protocols (e.g., Matrix, ActivityPub) +- a **major** technological addition, which can be taken advantage of by other extensions. Examples + of this are: + - a unified WebSocket Gateway connection scheme + - Message Layer Encryption (MLS) + - Compatibility with other protocols (e.g., Matrix, ActivityPub) - a definition of a [service](#9-services). Examples of this are: - - A federated chat application - - A federated social media platform + - A federated chat application + - A federated social media platform -A good P2 extension should never be both at the same time. If a P2 extension is both a -major technological addition and a document describing a particular application use case, it should -likely be split into two separate extensions. +A good P2 extension should never be both at the same time. If a P2 extension is both a major +technological addition and a document describing a particular application use case, it should likely +be split into two separate extensions. Designing P2 extensions, which only specify a single route or a small set of behavior changes, is -discouraged. Instead, these should be implemented as part of a larger extension, which offers a -more comprehensive set of features. +discouraged. Instead, these should be implemented as part of a larger extension, which offers a more +comprehensive set of features. :::note -If you are, say, developing a polyproto server implementation with a feature that is not part of -the default polyproto specification, you do not have to create a P2 extension for this feature. -P2 extensions are useful for defining interoperable services, which can be implemented by a variety -of servers and clients. +If you are, say, developing a polyproto server implementation with a feature that is not part of the +default polyproto specification, you do not have to create a P2 extension for this feature. P2 +extensions are useful for defining interoperable services, which can be implemented by a variety of +servers and clients. ::: ### 8.2 Namespaces A namespace is a string used to identify a specific P2 extension. Used as a prefix in URLs, they -prevent route name collisions between different extensions. Namespaces should be unique -and descriptive. They must only contain lowercase letters, numbers, hyphens, and underscores. -Namespaces must be at least 2 characters long and at most 64 characters long. +prevent route name collisions between different extensions. Namespaces should be unique and +descriptive. They must only contain lowercase letters, numbers, hyphens, and underscores. Namespaces +must be at least 2 characters long and at most 64 characters long. Officially endorsed P2 extensions have priority over selecting namespaces. If a namespace is already taken by an officially endorsed extension, a different namespace must be chosen. If a namespace @@ -2423,8 +2443,8 @@ Officially endorsed extensions are extensions that either: - have been developed by the maintainers themselves - have been developed by a third party and are now maintained by the polyproto maintainers -Contact the polyphony-chat maintainers at [info@polyphony.chat](mailto:info@polyphony.chat) -if you want to have your extension officially endorsed. +Contact the polyphony-chat maintainers at [info@polyphony.chat](mailto:info@polyphony.chat) if you +want to have your extension officially endorsed. Officially endorsed extensions must fulfill all the requirements listed in [section 8](#8-protocol-extensions-p2-extensions). @@ -2441,9 +2461,9 @@ made to the extension. The only exception to this rule is when marking an extens #### 8.4.1 Yanking -Yanking an extension means that the extension is no longer supported and that it **should not** be used. -A later version of the extension should be used instead. Yanked extension versions should prominently -display the "yanked" status next to the version number in the extension's documentation. +Yanking an extension means that the extension is no longer supported and that it **should not** be +used. A later version of the extension should be used instead. Yanked extension versions should +prominently display the "yanked" status next to the version number in the extension's documentation. Versions of officially endorsed P2 extensions can normally not be removed, only marked as yanked. @@ -2464,41 +2484,40 @@ The following syntax is used for indicating the version number of a dependency: When selecting a version number for a dependency, the highest possible version number that fulfills the requirements should be selected. -The name of the dependency, along with the version number, is to be listed right beneath the extension's -version declaration in the extension's documentation. Ideally, a link to the dependencies' specification -document should be included. +The name of the dependency, along with the version number, is to be listed right beneath the +extension's version declaration in the extension's documentation. Ideally, a link to the +dependencies' specification document should be included. -To grow the ecosystem of interoperable [services](#9-services), it is encouraged to first develop -a generic version of that service, which acts as a shared base for all implementations. This shared +To grow the ecosystem of interoperable [services](#9-services), it is encouraged to first develop a +generic version of that service, which acts as a shared base for all implementations. This shared base can then be extended with the exact, non-service-critical features that are needed for a specific implementation. -For example, a generic, federated chat service extension might offer routes for adding -reactions to chat messages. However, a route for adding reactions with full-screen animation effects -would be better suited as an implementation-specific detail. +For example, a generic, federated chat service extension might offer routes for adding reactions to +chat messages. However, a route for adding reactions with full-screen animation effects would be +better suited as an implementation-specific detail. If possible for the given use case, P2 extensions should depend on and extend already existing, officially endorsed P2 extensions. :::tip[Example] -Say, you are developing a social chat platform using polyproto. In this example, you would like -your chat platform to have a feature, which is not part of the officially endorsed -`polyproto-chat` extension. Instead of developing a new extension from scratch, your chat -extension should likely depend on `polyproto-chat` and define only this new feature as part of -your own extension. +Say, you are developing a social chat platform using polyproto. In this example, you would like your +chat platform to have a feature, which is not part of the officially endorsed `polyproto-chat` +extension. Instead of developing a new extension from scratch, your chat extension should likely +depend on `polyproto-chat` and define only this new feature as part of your own extension. ::: -Doing this ensures a high level of interoperability across all different implementations of a specific -application group. +Doing this ensures a high level of interoperability across all different implementations of a +specific application group. ### 8.6 Routes -Polyproto extensions must never change, add, or remove routes defined by the extension they depend on. -Instead, routes with alternating or new behavior must be added under a newly defined namespace, which -must differ from the original namespace. Changing the behavior of existing routes breaks compatibility -with other implementations of the same extension. +Polyproto extensions must never change, add, or remove routes defined by the extension they depend +on. Instead, routes with alternating or new behavior must be added under a newly defined namespace, +which must differ from the original namespace. Changing the behavior of existing routes breaks +compatibility with other implementations of the same extension. Route paths must always start with `.p2/`, followed by the extensions' namespace. Namespaces are explained in [section 8.2](#82-namespaces). @@ -2507,14 +2526,14 @@ explained in [section 8.2](#82-namespaces). :::info -A "service" is any application-specific implementation of polyproto, defined by a P2 extension. -All services are P2 extensions, but not all P2 extensions are services. +A "service" is any application-specific implementation of polyproto, defined by a P2 extension. All +services are P2 extensions, but not all P2 extensions are services. ::: -Actors can use their identity to register with any server hosting polyproto services, such as polyproto-chat. -These servers can be the actors' home server, but can also be foreign servers. There is no limitation -to how many services any given actor can register with and what these services are. +Actors can use their identity to register with any server hosting polyproto services, such as +polyproto-chat. These servers can be the actors' home server, but can also be foreign servers. There +is no limitation to how many services any given actor can register with and what these services are. Application-specific implementations of polyproto should consider that users of their service might also want to register for services offered by other servers, using the same identity. @@ -2522,26 +2541,26 @@ also want to register for services offered by other servers, using the same iden ## 9.1 Discoverability The discoverability feature allows users who are registered with the same service but on different -servers to communicate with each other. The actor initiating the communication only needs to know the -federation ID of the actor they want to communicate with. Consider the following example: +servers to communicate with each other. The actor initiating the communication only needs to know +the federation ID of the actor they want to communicate with. Consider the following example: ::::tip[Example: Discovering services] :::info -The example below is simplified for the sake of clarity. In a real-world scenario, Alice -and the chat server would perform the foreign server authentication procedure described in -[section 4.1.1](#411-authenticating-on-a-foreign-server) before Alice can send a -chat message to Bob. The example also uses a simplified example of how polyproto-chat works. +The example below is simplified for the sake of clarity. In a real-world scenario, Alice and the +chat server would perform the foreign server authentication procedure described in +[section 4.1.1](#411-authenticating-on-a-foreign-server) before Alice can send a chat message to +Bob. The example also uses a simplified example of how polyproto-chat works. ::: -Alice and Bob want to communicate with each other. Both Alice and Bob are registered on servers -that host the polyproto-chat service. However, Alice and Bob are not registered on the same -server, and they do not share any chat rooms. Alice types in Bob's federation ID into her -chat client. The client then queries Bob's home server to find out which server Bob uses -for the polyproto-chat service. Alice's client can then send the chat message to Bob's server, -which will forward the chat message to Bob. +Alice and Bob want to communicate with each other. Both Alice and Bob are registered on servers that +host the polyproto-chat service. However, Alice and Bob are not registered on the same server, and +they do not share any chat rooms. Alice types in Bob's federation ID into her chat client. The +client then queries Bob's home server to find out which server Bob uses for the polyproto-chat +service. Alice's client can then send the chat message to Bob's server, which will forward the chat +message to Bob. ```mermaid sequenceDiagram @@ -2558,33 +2577,32 @@ aa->>sc: Message to Bob sc->>ab: Forward message from Alice to Bob ``` -*Fig. 9: Sequence diagram depicting how Alice's client discovers which server Bob is using for -the exemplary polyproto-chat service.* +_Fig. 9: Sequence diagram depicting how Alice's client discovers which server Bob is using for the +exemplary polyproto-chat service._ The example demonstrates how Alice can communicate with Bob, even though they do not share any servers. :::: -To be discoverable, an actor must add a key-value pair to their home server's database. The -key is the name of the service, and the value is the base URL of the server hosting the service. +To be discoverable, an actor must add a key-value pair to their home server's database. The key is +the name of the service, and the value is the base URL of the server hosting the service. The API routes for managing discoverability are documented in the [API documentation](https://apidocs.polyproto.org) ### 9.1.1 Changing a primary service provider -Keys are unique in the actor-scoped service->service-provider table. Actors wanting -to register for two or more different implementations of the same service must select which -service provider to use as a "primary service provider" for that service. +Keys are unique in the actor-scoped service->service-provider table. Actors wanting to register for +two or more different implementations of the same service must select which service provider to use +as a "primary service provider" for that service. -If the actor is human, clients must not override the existing -key-value pair silently. Instead, clients must either ask the actor to confirm the change or -not change the key-value pair. +If the actor is human, clients must not override the existing key-value pair silently. Instead, +clients must either ask the actor to confirm the change or not change the key-value pair. -Changing a primary service provider entry is considered a sensitive action and should -require a second factor of authentication. +Changing a primary service provider entry is considered a sensitive action and should require a +second factor of authentication. -Messages do not get moved or re-signed when changing the primary -service provider for a given service. If an actor wants to move their messages to the new primary -service provider, they must request a [migration](#7-migrations). +Messages do not get moved or re-signed when changing the primary service provider for a given +service. If an actor wants to move their messages to the new primary service provider, they must +request a [migration](#7-migrations). diff --git a/docs/protocols/mls.md b/docs/protocols/mls.md index 1ca2217..aa2b99e 100644 --- a/docs/protocols/mls.md +++ b/docs/protocols/mls.md @@ -2,8 +2,123 @@ **Namespace:** `mls` -**Version:** `v1.0-alpha.1` +**Version:** `v1.0-alpha.2` **Base Path:** `/.p2/mls/v1/` -The polyproto-mls extension defines how Messaging Layer Security (MLS) integrates with polyproto. +[Semantic versioning v2.0.0](https://semver.org/spec/v2.0.0.html) is used to version this +specification. + +The polyproto-mls extension defines how Messaging Layer Security (herein after referred to as its +commonly used acronym, MLS) integrates with polyproto. The basis of this document is +[IETF RFC 9420](https://www.rfc-editor.org/rfc/rfc9420.html). + +## 1. Cryptographic Suites + +This section lists pre-defined cryptographic suites that can be implemented by polyproto-mls +compatible implementations. + +| Name | Numeric Identifier (MLS Numeric Value) | Quantum-Secure? | Recommended? | Required? | +| ----------- | -------------------------------------- | --------------- | ------------ | --------- | +| MLS default | `0x0001` | No | Yes | Yes | +| Hybrid PQS | `0xF110` | Yes | Yes | Yes | + +### 1.1. MLS default cipher suite + +In coherence with section #6.5 of the core polyproto specification document and section #17.1 of RFC +9420, the cipher suite `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` must be implemented by +polyproto-mls implementations. This cipher suite uses Curve25519 for key exchange, +[AES-128-GCM](https://en.wikipedia.org/wiki/Galois/Counter_Mode) for +[HPKE](https://www.rfc-editor.org/rfc/rfc9180.html), +[HKDF]() +over SHA2-256, and Ed25519 for signatures. + +| MLS Cipher Suite Component | Component | Quantum-Secure? | +| -------------------------- | ----------- | --------------- | +| Key Exchange | Curve25519 | No | +| HPKE | AES-128-GCM | Partially | +| HKDF | SHA2-256 | Partially | +| Signatures | Ed25519 | No | + +### 1.2. Hybrid PQC cipher suite + +polyproto-mls v1 offers a +[Post-Quantum cryptography (PQC)](https://en.wikipedia.org/wiki/Post-quantum_cryptography) cipher +suite in order to resist possible cryptanalytic attacks by a quantum computer on encrypted messages +shared via polyproto-mls. + +:::info + +The MLS working group is considering adoption of the draft document +[ML-KEM and Hybrid Cipher Suites for MLS](https://datatracker.ietf.org/doc/html/draft-mahy-mls-pq). +Until official recommendations are made regarding eligible, well-suited cipher suites for hybrid, +Post-Quantum/Traditional authenticity and confidentiality guarantees, polyproto-mls may either +employ a temporary, own cipher suite, or leave this particular section unfinished. + +::: + +## 2. Home Server Responsibilities + +Services aiming to be compliant with polyproto-mls v1 must take on the roles of Authentication +Service (AS) and Delivery Service (DS) in the context of Messaging Layer Security. + +:::note[Quote from the MLS Architecture Document:] + +> An Authentication Service (AS), which is responsible for attesting to bindings between +> application-meaningful identifiers and the public key material used for authentication in the MLS +> protocol. The AS must also be able to generate credentials that encode these bindings and validate +> credentials provided by MLS clients.
A Delivery Service (DS), which can receive and +> distribute messages between group members. In the case of group messaging, the DS may also be +> responsible for acting as a "broadcaster" where the sender sends a single message which is then +> forwarded to each recipient in the group by the DS. The DS is also responsible for storing and +> delivering initial public key material required by MLS clients in order to proceed with the group +> secret key establishment that is part of the MLS protocol. + +_[Source](https://messaginglayersecurity.rocks/mls-architecture/draft-ietf-mls-architecture.html#name-abstract-services)_ + +::: + +## 3. HTTP APIs + +polyproto-mls provides a set of versioned HTTP APIs, accessible via the `mls` namespace. The HTTP +APIs provide functionality required by the MLS specification for polyproto-mls implementers to also +be compliant MLS implementations. + +The HTTP APIs offered through polyproto-mls are fully documented as OpenAPI specification documents +for OpenAPI versions 3.0.0 and 3.1.0. The OpenAPI specification documents can be found +[at this link](https://github.com/polyphony-chat/typespec-openapi/tree/main/build). Select the +preferred OpenAPI version and find the versioned `.yaml` files in the corresponding folder. + +## 4. WebSocket Gateway + +The polyproto-mls extension specification also registers a namespace `mls` for the WebSocket +protocol defined in section #3 of the polyproto-core specification. As usual for WebSockets, this +provides optional, real-time bidirectional communication functionality for implementations that +require it. + +Servers **must** offer the `mls` namespace for the WebSocket Gateway and the functionality described +by this chapter. Clients may subscribe to the `mls` WebSocket Gateway Service Channel by sending a +subscribe event with the appropriate contents to the polyproto-mls WebSocket Gateway server. + +:::tip[Example MLS subscribe payload] + +```json +{ + "n": "core", + "op": 8, + "d": { + "action": "subscribe", + "service": "mls" + } +} +``` + +::: + +### 4.1. Gateway Event Opcodes + +The following opcodes are defined by the `mls` namespace: + +| Opcode | Name | Action | Description | +| ------ | ---- | ------ | ----------- | +| `TBD` | TBD | TBD | TBD | diff --git a/docusaurus.config.ts b/docusaurus.config.ts index 8dd2d42..4ded224 100644 --- a/docusaurus.config.ts +++ b/docusaurus.config.ts @@ -116,14 +116,14 @@ const config: Config = { } satisfies Preset.ThemeConfig, markdown: { mermaid: true, + format: "md", }, themes: ["@docusaurus/theme-mermaid"], stylesheets: [ { href: "https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css", type: "text/css", - integrity: - "sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM", + integrity: "sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM", crossorigin: "anonymous", }, ], diff --git a/package.json b/package.json index c12af87..8a1f991 100644 --- a/package.json +++ b/package.json @@ -1,5 +1,5 @@ { - "name": "polyproto-docs", + "name": "polyproto-dot-org", "version": "0.0.0", "private": true, "license": "MPL-2.0", diff --git a/plugins/tailwind-config.cjs b/plugins/tailwind-config.cjs index f2b1775..ea46fef 100644 --- a/plugins/tailwind-config.cjs +++ b/plugins/tailwind-config.cjs @@ -1,15 +1,15 @@ function tailwindPlugin(context, options) { return { - name: 'tailwind-plugin', - configurePostCss(postcssOptions) { - postcssOptions.plugins = [ - require('postcss-import'), - require('tailwindcss'), - require('autoprefixer'), - ]; - return postcssOptions; - }, - }; - } - - module.exports = tailwindPlugin; \ No newline at end of file + name: "tailwind-plugin", + configurePostCss(postcssOptions) { + postcssOptions.plugins = [ + require("postcss-import"), + require("tailwindcss"), + require("autoprefixer"), + ] + return postcssOptions + }, + } +} + +module.exports = tailwindPlugin diff --git a/sidebars.ts b/sidebars.ts index 2897139..96033dd 100644 --- a/sidebars.ts +++ b/sidebars.ts @@ -1,4 +1,4 @@ -import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; +import type { SidebarsConfig } from "@docusaurus/plugin-content-docs" // This runs in Node.js - Don't use client-side code here (browser APIs, JSX...) @@ -13,11 +13,11 @@ import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; Create as many sidebars as you want. */ const sidebars: SidebarsConfig = { - // By default, Docusaurus generates a sidebar from the docs folder structure - tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + // By default, Docusaurus generates a sidebar from the docs folder structure + tutorialSidebar: [{ type: "autogenerated", dirName: "." }], - // But you can create a sidebar manually - /* + // But you can create a sidebar manually + /* tutorialSidebar: [ 'intro', 'hello', @@ -28,6 +28,6 @@ const sidebars: SidebarsConfig = { }, ], */ -}; +} -export default sidebars; +export default sidebars diff --git a/src/components/About.tsx b/src/components/About.tsx index c7c2293..b9492b5 100644 --- a/src/components/About.tsx +++ b/src/components/About.tsx @@ -1,79 +1,84 @@ -import Wave from "react-wavify"; -import { Anchor } from "./Anchor"; +import Wave from "react-wavify" +import { Anchor } from "./Anchor" export function About(): JSX.Element { - return ( -
-
- -
-
- -
-
-
-
-
-

- What is{" "} - - polyproto - {" "} -
all about? -

-

- What is{" "} - - polyproto - {" "} - all about? -

+ return ( +
+
+
-
- Diagram showing digital signature verification flow: YOU send signed message to FOREIGN SERVER, which forwards to OTHER USER, who verifies signature via HOME SERVER. +
+
-

- polyproto is all about decentralized, federated data exchange using tried and true - concepts. Designed to be flexible, polyproto can fit almost any usecase. The best part? - The user remains in control. No more losing friends and followers if your old homeserver - sunsets. Feel free to migrate to another homeserver at any time, even if your old homeserver - has shut down ages ago. -

-
- - learn more - +
+
+
+
+

+ What is{" "} + + polyproto + {" "} +
all about? +

+

+ What is{" "} + + polyproto + {" "} + all about? +

+
+
+ Diagram showing digital signature verification flow: YOU send signed message to FOREIGN SERVER, which forwards to OTHER USER, who verifies signature via HOME SERVER. +
+

+ polyproto is all about decentralized, federated data exchange using + tried and true concepts. Designed to be flexible, polyproto can fit + almost any usecase. The best part? The user remains in control. No more + losing friends and followers if your old homeserver sunsets. Feel free + to migrate to another homeserver at any time, even if your old + homeserver has shut down ages ago. +

+
+ + learn more + +
+
+
+
+ +
+
+
-
-
-
- -
-
-
-
-
- ); + + ) } diff --git a/src/components/Anchor.tsx b/src/components/Anchor.tsx index 23b6cc0..429e9d1 100644 --- a/src/components/Anchor.tsx +++ b/src/components/Anchor.tsx @@ -1,14 +1,15 @@ -import classNames from 'classnames' +import classNames from "classnames" -const ANCHOR_STYLES = "border border-poly-green-1 bg-poly-black px-4 py-2 text-poly-green-1 rounded-md text-center hover:no-underline hover:text-poly-green-1 flex justify-center items-center" +const ANCHOR_STYLES = + "border border-poly-green-1 bg-poly-black px-4 py-2 text-poly-green-1 rounded-md text-center hover:no-underline hover:text-poly-green-1 flex justify-center items-center" export function Anchor(props: { - className: string, - href: string, - children: React.ReactNode, - }): JSX.Element { + className: string + href: string + children: React.ReactNode +}): JSX.Element { return ( - - {props.children} - - ); - } \ No newline at end of file + + {props.children} + + ) +} diff --git a/src/components/GetStarted.tsx b/src/components/GetStarted.tsx index 9551c6d..efe95fa 100644 --- a/src/components/GetStarted.tsx +++ b/src/components/GetStarted.tsx @@ -65,46 +65,34 @@ function EmojiParticles({ trigger }: { trigger: boolean }) { return (
- {particles.map( - ({ - id, - emoji, - offsetX, - offsetY, - delay, - initialX, - initialY, - }) => ( - - setParticles((prev) => - prev.filter((p) => p.id !== id) - ) - } - className="absolute text-xl" - > - {emoji} - - ) - )} + {particles.map(({ id, emoji, offsetX, offsetY, delay, initialX, initialY }) => ( + + setParticles((prev) => prev.filter((p) => p.id !== id)) + } + className="absolute text-xl" + > + {emoji} + + ))}
) } @@ -174,10 +162,11 @@ export function GetStarted(): JSX.Element { > {/* Rainbow ring layer */} {/* Emoji particles behind text */} {hovered && !isReducedMotion && ( diff --git a/src/components/Hero.tsx b/src/components/Hero.tsx index 500b70f..f920078 100644 --- a/src/components/Hero.tsx +++ b/src/components/Hero.tsx @@ -1,36 +1,41 @@ -import { Anchor } from "./Anchor"; -import ArrowRight from "./icons/ArrowRight"; +import { Anchor } from "./Anchor" +import ArrowRight from "./icons/ArrowRight" export function Hero(): JSX.Element { - return ( -
-
-
-
-
-

- A refreshingly simple
- decentralised, federated protocol -

-

- Transforming -

+ return ( +
+
+
+
+
+

+ A refreshingly simple
+ decentralised, federated protocol +

+

+ Transforming +

+
+
+

+ online +

+

+  communication +

+
+
+ + Get Started + + +
+
+
-
-

- online -

-

 communication

-
-
- - Get Started - - -
-
-
-
-
- ); + + ) } diff --git a/src/components/Product.tsx b/src/components/Product.tsx index 9240520..2ede264 100644 --- a/src/components/Product.tsx +++ b/src/components/Product.tsx @@ -1,163 +1,164 @@ export function Product(): JSX.Element { - return ( -
-
-
-
-
-

- Not just different. Better! -

-
-
-
-
-
-
- - - - - - - - - - - - - - - - - - -
-

- Your digital home, - wherever you go. -

-

- Switch home servers even if your home server is offline, while - making sure that your data stays in your control. -

-
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - + return ( +
+
+
+
+
+

+ Not just different. Better! +

+
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + +
+

+ Your digital home, + wherever you go. +

+

+ Switch home servers even if your home server is offline, while + making sure that your data stays in your control. +

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+

+ Tamper-Proof + Messages +

+

+ Produces tamper-resistant data/messages through the use of + signatures. Trust that what you see has not been altered along + the way. +

+
+
+
-

- Tamper-Proof - Messages -

-

- Produces tamper-resistant data/messages through the use of signatures. Trust that - what you see has not been altered along the way. -

-
-
-
-
-
- ); + + ) } diff --git a/src/components/icons/ArrowRight.tsx b/src/components/icons/ArrowRight.tsx index a25aaa5..0c65ac3 100644 --- a/src/components/icons/ArrowRight.tsx +++ b/src/components/icons/ArrowRight.tsx @@ -1,18 +1,18 @@ -import React from "react"; +import React from "react" function Icon(props: { className: string }) { - return ( - - - - ); + return ( + + + + ) } -export default Icon; +export default Icon diff --git a/src/css/custom.css b/src/css/custom.css index 8d7edda..7cfffeb 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -32,10 +32,10 @@ [class*="clean-btn"]::after, [class*="clean-btn"]::before, .breadcrumbs__item::after { - /* + /* Some icons are apparently done using a `background:`, as such applying i.e `color:` is not possible. Definitely a hack. - */ + */ filter: invert(100)!important; } code { @@ -90,16 +90,16 @@ code { } .breadcrumbs__link { - /* + /* for some reason, "sub" is centered here, instead of "middle". - */ + */ vertical-align: sub; } .breadcrumbs__item--active .breadcrumbs__link { /* Again, I don't know what the FUCK is happening here but it's aligned now - */ + */ vertical-align: super; } diff --git a/src/pages/index.module.css b/src/pages/index.module.css index 9f71a5d..b1fb774 100644 --- a/src/pages/index.module.css +++ b/src/pages/index.module.css @@ -4,20 +4,20 @@ */ .heroBanner { - padding: 4rem 0; - text-align: center; - position: relative; - overflow: hidden; + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; } @media screen and (max-width: 996px) { - .heroBanner { - padding: 2rem; - } + .heroBanner { + padding: 2rem; + } } .buttons { - display: flex; - align-items: center; - justify-content: center; + display: flex; + align-items: center; + justify-content: center; } diff --git a/src/pages/index.tsx b/src/pages/index.tsx index b1a8306..cb96a12 100644 --- a/src/pages/index.tsx +++ b/src/pages/index.tsx @@ -1,25 +1,25 @@ -import Layout from "@theme/Layout"; -import type { ReactNode } from "react"; +import Layout from "@theme/Layout" +import type { ReactNode } from "react" -import { About } from "../components/About"; -import { Contribute } from "../components/Contribute"; -import { GetStarted } from "../components/GetStarted"; -import { Hero } from "../components/Hero"; -import { Product } from "../components/Product"; +import { About } from "../components/About" +import { Contribute } from "../components/Contribute" +import { GetStarted } from "../components/GetStarted" +import { Hero } from "../components/Hero" +import { Product } from "../components/Product" export default function Home(): ReactNode { - return ( - -
- - - - - -
-
- ); + return ( + +
+ + + + + +
+
+ ) } diff --git a/src/theme/Footer/Copyright/index.tsx b/src/theme/Footer/Copyright/index.tsx index fcfc8b3..65ff269 100644 --- a/src/theme/Footer/Copyright/index.tsx +++ b/src/theme/Footer/Copyright/index.tsx @@ -1,13 +1,13 @@ -import React, {type ReactNode} from 'react'; -import type {Props} from '@theme/Footer/Copyright'; +import React, { type ReactNode } from "react" +import type { Props } from "@theme/Footer/Copyright" -export default function FooterCopyright({copyright}: Props): ReactNode { - return ( -
- ); +export default function FooterCopyright({ copyright }: Props): ReactNode { + return ( +
+ ) } diff --git a/src/theme/Footer/Layout/index.tsx b/src/theme/Footer/Layout/index.tsx index f93829e..ca2b0f3 100644 --- a/src/theme/Footer/Layout/index.tsx +++ b/src/theme/Footer/Layout/index.tsx @@ -1,31 +1,26 @@ -import type { Props } from "@theme/Footer/Layout"; -import clsx from "clsx"; -import { type ReactNode } from "react"; +import type { Props } from "@theme/Footer/Layout" +import clsx from "clsx" +import { type ReactNode } from "react" -export default function FooterLayout({ - style, - links, - logo, - copyright, -}: Props): ReactNode { - return ( -
-
- {links} - {(logo || copyright) && ( -
- {logo &&
{logo}
} - {copyright} -
- )} -
-
- Copyright © 2025 The polyproto Contributors. -
-
- ); +export default function FooterLayout({ style, links, logo, copyright }: Props): ReactNode { + return ( +
+
+ {links} + {(logo || copyright) && ( +
+ {logo &&
{logo}
} + {copyright} +
+ )} +
+
+ Copyright © 2025 The polyproto Contributors. +
+
+ ) } diff --git a/src/theme/Footer/LinkItem/index.tsx b/src/theme/Footer/LinkItem/index.tsx index d2212e1..394bf4f 100644 --- a/src/theme/Footer/LinkItem/index.tsx +++ b/src/theme/Footer/LinkItem/index.tsx @@ -1,29 +1,33 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import Link from '@docusaurus/Link'; -import useBaseUrl from '@docusaurus/useBaseUrl'; -import isInternalUrl from '@docusaurus/isInternalUrl'; -import IconExternalLink from '@theme/Icon/ExternalLink'; -import type {Props} from '@theme/Footer/LinkItem'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import Link from "@docusaurus/Link" +import useBaseUrl from "@docusaurus/useBaseUrl" +import isInternalUrl from "@docusaurus/isInternalUrl" +import IconExternalLink from "@theme/Icon/ExternalLink" +import type { Props } from "@theme/Footer/LinkItem" -export default function FooterLinkItem({item}: Props): ReactNode { - const {to, href, label, prependBaseUrlToHref, className, ...props} = item; - const toUrl = useBaseUrl(to); - const normalizedHref = useBaseUrl(href, {forcePrependBaseUrl: true}); +export default function FooterLinkItem({ item }: Props): ReactNode { + const { to, href, label, prependBaseUrlToHref, className, ...props } = item + const toUrl = useBaseUrl(to) + const normalizedHref = useBaseUrl(href, { forcePrependBaseUrl: true }) - return ( - - {label} - {href && !isInternalUrl(href) && } - - ); + return ( + + {label} + {href && !isInternalUrl(href) && } + + ) } diff --git a/src/theme/Footer/Links/MultiColumn/index.tsx b/src/theme/Footer/Links/MultiColumn/index.tsx index dd9d43e..5dccd01 100644 --- a/src/theme/Footer/Links/MultiColumn/index.tsx +++ b/src/theme/Footer/Links/MultiColumn/index.tsx @@ -1,45 +1,45 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import LinkItem from '@theme/Footer/LinkItem'; -import type {Props} from '@theme/Footer/Links/MultiColumn'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import LinkItem from "@theme/Footer/LinkItem" +import type { Props } from "@theme/Footer/Links/MultiColumn" -type ColumnType = Props['columns'][number]; -type ColumnItemType = ColumnType['items'][number]; +type ColumnType = Props["columns"][number] +type ColumnItemType = ColumnType["items"][number] -function ColumnLinkItem({item}: {item: ColumnItemType}) { - return item.html ? ( -
  • - ) : ( -
  • - -
  • - ); +function ColumnLinkItem({ item }: { item: ColumnItemType }) { + return item.html ? ( +
  • + ) : ( +
  • + +
  • + ) } -function Column({column}: {column: ColumnType}) { - return ( -
    -
    {column.title}
    -
      - {column.items.map((item, i) => ( - - ))} -
    -
    - ); +function Column({ column }: { column: ColumnType }) { + return ( +
    +
    {column.title}
    +
      + {column.items.map((item, i) => ( + + ))} +
    +
    + ) } -export default function FooterLinksMultiColumn({columns}: Props): ReactNode { - return ( -
    - {columns.map((column, i) => ( - - ))} -
    - ); +export default function FooterLinksMultiColumn({ columns }: Props): ReactNode { + return ( +
    + {columns.map((column, i) => ( + + ))} +
    + ) } diff --git a/src/theme/Footer/Links/Simple/index.tsx b/src/theme/Footer/Links/Simple/index.tsx index dc6cf9d..8ea666e 100644 --- a/src/theme/Footer/Links/Simple/index.tsx +++ b/src/theme/Footer/Links/Simple/index.tsx @@ -1,36 +1,36 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import LinkItem from '@theme/Footer/LinkItem'; -import type {Props} from '@theme/Footer/Links/Simple'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import LinkItem from "@theme/Footer/LinkItem" +import type { Props } from "@theme/Footer/Links/Simple" function Separator() { - return ·; + return · } -function SimpleLinkItem({item}: {item: Props['links'][number]}) { - return item.html ? ( - - ) : ( - - ); +function SimpleLinkItem({ item }: { item: Props["links"][number] }) { + return item.html ? ( + + ) : ( + + ) } -export default function FooterLinksSimple({links}: Props): ReactNode { - return ( -
    -
    - {links.map((item, i) => ( - - - {links.length !== i + 1 && } - - ))} -
    -
    - ); +export default function FooterLinksSimple({ links }: Props): ReactNode { + return ( +
    +
    + {links.map((item, i) => ( + + + {links.length !== i + 1 && } + + ))} +
    +
    + ) } diff --git a/src/theme/Footer/Links/index.tsx b/src/theme/Footer/Links/index.tsx index 2dd3d84..a0ad8bf 100644 --- a/src/theme/Footer/Links/index.tsx +++ b/src/theme/Footer/Links/index.tsx @@ -1,14 +1,14 @@ -import React, {type ReactNode} from 'react'; +import React, { type ReactNode } from "react" -import {isMultiColumnFooterLinks} from '@docusaurus/theme-common'; -import FooterLinksMultiColumn from '@theme/Footer/Links/MultiColumn'; -import FooterLinksSimple from '@theme/Footer/Links/Simple'; -import type {Props} from '@theme/Footer/Links'; +import { isMultiColumnFooterLinks } from "@docusaurus/theme-common" +import FooterLinksMultiColumn from "@theme/Footer/Links/MultiColumn" +import FooterLinksSimple from "@theme/Footer/Links/Simple" +import type { Props } from "@theme/Footer/Links" -export default function FooterLinks({links}: Props): ReactNode { - return isMultiColumnFooterLinks(links) ? ( - - ) : ( - - ); +export default function FooterLinks({ links }: Props): ReactNode { + return isMultiColumnFooterLinks(links) ? ( + + ) : ( + + ) } diff --git a/src/theme/Footer/Logo/index.tsx b/src/theme/Footer/Logo/index.tsx index 8274216..78c7a5b 100644 --- a/src/theme/Footer/Logo/index.tsx +++ b/src/theme/Footer/Logo/index.tsx @@ -1,39 +1,36 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import Link from '@docusaurus/Link'; -import {useBaseUrlUtils} from '@docusaurus/useBaseUrl'; -import ThemedImage from '@theme/ThemedImage'; -import type {Props} from '@theme/Footer/Logo'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import Link from "@docusaurus/Link" +import { useBaseUrlUtils } from "@docusaurus/useBaseUrl" +import ThemedImage from "@theme/ThemedImage" +import type { Props } from "@theme/Footer/Logo" -import styles from './styles.module.css'; +import styles from "./styles.module.css" -function LogoImage({logo}: Props) { - const {withBaseUrl} = useBaseUrlUtils(); - const sources = { - light: withBaseUrl(logo.src), - dark: withBaseUrl(logo.srcDark ?? logo.src), - }; - return ( - - ); +function LogoImage({ logo }: Props) { + const { withBaseUrl } = useBaseUrlUtils() + const sources = { + light: withBaseUrl(logo.src), + dark: withBaseUrl(logo.srcDark ?? logo.src), + } + return ( + + ) } -export default function FooterLogo({logo}: Props): ReactNode { - return logo.href ? ( - - - - ) : ( - - ); +export default function FooterLogo({ logo }: Props): ReactNode { + return logo.href ? ( + + + + ) : ( + + ) } diff --git a/src/theme/Footer/Logo/styles.module.css b/src/theme/Footer/Logo/styles.module.css index faf0e60..c816163 100644 --- a/src/theme/Footer/Logo/styles.module.css +++ b/src/theme/Footer/Logo/styles.module.css @@ -1,9 +1,8 @@ .footerLogoLink { - opacity: 0.5; - transition: opacity var(--ifm-transition-fast) - var(--ifm-transition-timing-default); + opacity: 0.5; + transition: opacity var(--ifm-transition-fast) var(--ifm-transition-timing-default); } .footerLogoLink:hover { - opacity: 1; + opacity: 1; } diff --git a/src/theme/Footer/index.tsx b/src/theme/Footer/index.tsx index 595bc51..7df1468 100644 --- a/src/theme/Footer/index.tsx +++ b/src/theme/Footer/index.tsx @@ -1,26 +1,26 @@ -import React, {type ReactNode} from 'react'; +import React, { type ReactNode } from "react" -import {useThemeConfig} from '@docusaurus/theme-common'; -import FooterLinks from '@theme/Footer/Links'; -import FooterLogo from '@theme/Footer/Logo'; -import FooterCopyright from '@theme/Footer/Copyright'; -import FooterLayout from '@theme/Footer/Layout'; +import { useThemeConfig } from "@docusaurus/theme-common" +import FooterLinks from "@theme/Footer/Links" +import FooterLogo from "@theme/Footer/Logo" +import FooterCopyright from "@theme/Footer/Copyright" +import FooterLayout from "@theme/Footer/Layout" function Footer(): ReactNode { - const {footer} = useThemeConfig(); - if (!footer) { - return null; - } - const {copyright, links, logo, style} = footer; + const { footer } = useThemeConfig() + if (!footer) { + return null + } + const { copyright, links, logo, style } = footer - return ( - 0 && } - logo={logo && } - copyright={copyright && } - /> - ); + return ( + 0 && } + logo={logo && } + copyright={copyright && } + /> + ) } -export default React.memo(Footer); +export default React.memo(Footer) diff --git a/src/theme/Navbar/ColorModeToggle/index.tsx b/src/theme/Navbar/ColorModeToggle/index.tsx index 81b9647..a950808 100644 --- a/src/theme/Navbar/ColorModeToggle/index.tsx +++ b/src/theme/Navbar/ColorModeToggle/index.tsx @@ -1,26 +1,24 @@ -import React, {type ReactNode} from 'react'; -import {useColorMode, useThemeConfig} from '@docusaurus/theme-common'; -import ColorModeToggle from '@theme/ColorModeToggle'; -import type {Props} from '@theme/Navbar/ColorModeToggle'; -import styles from './styles.module.css'; +import React, { type ReactNode } from "react" +import { useColorMode, useThemeConfig } from "@docusaurus/theme-common" +import ColorModeToggle from "@theme/ColorModeToggle" +import type { Props } from "@theme/Navbar/ColorModeToggle" +import styles from "./styles.module.css" -export default function NavbarColorModeToggle({className}: Props): ReactNode { - const navbarStyle = useThemeConfig().navbar.style; - const disabled = useThemeConfig().colorMode.disableSwitch; - const {colorMode, setColorMode} = useColorMode(); +export default function NavbarColorModeToggle({ className }: Props): ReactNode { + const navbarStyle = useThemeConfig().navbar.style + const disabled = useThemeConfig().colorMode.disableSwitch + const { colorMode, setColorMode } = useColorMode() - if (disabled) { - return null; - } + if (disabled) { + return null + } - return ( - - ); + return ( + + ) } diff --git a/src/theme/Navbar/ColorModeToggle/styles.module.css b/src/theme/Navbar/ColorModeToggle/styles.module.css index 7bd077a..b435ad1 100644 --- a/src/theme/Navbar/ColorModeToggle/styles.module.css +++ b/src/theme/Navbar/ColorModeToggle/styles.module.css @@ -1,3 +1,3 @@ .darkNavbarColorModeToggle:hover { - background: var(--ifm-color-gray-800); + background: var(--ifm-color-gray-800); } diff --git a/src/theme/Navbar/Content/index.tsx b/src/theme/Navbar/Content/index.tsx index 2ec40d4..6aa7f71 100644 --- a/src/theme/Navbar/Content/index.tsx +++ b/src/theme/Navbar/Content/index.tsx @@ -1,8 +1,5 @@ import { ErrorCauseBoundary, useThemeConfig } from "@docusaurus/theme-common" -import { - splitNavbarItems, - useNavbarMobileSidebar, -} from "@docusaurus/theme-common/internal" +import { splitNavbarItems, useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" import NavbarLogo from "@theme/Navbar/Logo" import NavbarMobileSidebarToggle from "@theme/Navbar/MobileSidebar/Toggle" import NavbarSearch from "@theme/Navbar/Search" @@ -39,13 +36,7 @@ ${JSON.stringify(item, null, 2)}`, ) } -function NavbarContentLayout({ - left, - right, -}: { - left: ReactNode - right: ReactNode -}) { +function NavbarContentLayout({ left, right }: { left: ReactNode; right: ReactNode }) { return (
    {left}
    diff --git a/src/theme/Navbar/Content/styles.module.css b/src/theme/Navbar/Content/styles.module.css index 4c9471e..e40f310 100644 --- a/src/theme/Navbar/Content/styles.module.css +++ b/src/theme/Navbar/Content/styles.module.css @@ -2,7 +2,7 @@ Hide color mode toggle in small viewports */ @media (max-width: 996px) { - .colorModeToggle { - display: none; - } + .colorModeToggle { + display: none; + } } diff --git a/src/theme/Navbar/Layout/index.tsx b/src/theme/Navbar/Layout/index.tsx index d629301..67a2046 100644 --- a/src/theme/Navbar/Layout/index.tsx +++ b/src/theme/Navbar/Layout/index.tsx @@ -1,56 +1,51 @@ -import React, {type ComponentProps, type ReactNode} from 'react'; -import clsx from 'clsx'; -import {useThemeConfig} from '@docusaurus/theme-common'; -import { - useHideableNavbar, - useNavbarMobileSidebar, -} from '@docusaurus/theme-common/internal'; -import {translate} from '@docusaurus/Translate'; -import NavbarMobileSidebar from '@theme/Navbar/MobileSidebar'; -import type {Props} from '@theme/Navbar/Layout'; +import React, { type ComponentProps, type ReactNode } from "react" +import clsx from "clsx" +import { useThemeConfig } from "@docusaurus/theme-common" +import { useHideableNavbar, useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" +import { translate } from "@docusaurus/Translate" +import NavbarMobileSidebar from "@theme/Navbar/MobileSidebar" +import type { Props } from "@theme/Navbar/Layout" -import styles from './styles.module.css'; +import styles from "./styles.module.css" -function NavbarBackdrop(props: ComponentProps<'div'>) { - return ( -
    - ); +function NavbarBackdrop(props: ComponentProps<"div">) { + return ( +
    + ) } -export default function NavbarLayout({children}: Props): ReactNode { - const { - navbar: {hideOnScroll, style}, - } = useThemeConfig(); - const mobileSidebar = useNavbarMobileSidebar(); - const {navbarRef, isNavbarVisible} = useHideableNavbar(hideOnScroll); - return ( - - ); +export default function NavbarLayout({ children }: Props): ReactNode { + const { + navbar: { hideOnScroll, style }, + } = useThemeConfig() + const mobileSidebar = useNavbarMobileSidebar() + const { navbarRef, isNavbarVisible } = useHideableNavbar(hideOnScroll) + return ( + + ) } diff --git a/src/theme/Navbar/Layout/styles.module.css b/src/theme/Navbar/Layout/styles.module.css index e72891a..5681a23 100644 --- a/src/theme/Navbar/Layout/styles.module.css +++ b/src/theme/Navbar/Layout/styles.module.css @@ -1,7 +1,7 @@ .navbarHideable { - transition: transform var(--ifm-transition-fast) ease; + transition: transform var(--ifm-transition-fast) ease; } .navbarHidden { - transform: translate3d(0, calc(-100% - 2px), 0); + transform: translate3d(0, calc(-100% - 2px), 0); } diff --git a/src/theme/Navbar/MobileSidebar/Header/index.tsx b/src/theme/Navbar/MobileSidebar/Header/index.tsx index 46a71e9..89edb63 100644 --- a/src/theme/Navbar/MobileSidebar/Header/index.tsx +++ b/src/theme/Navbar/MobileSidebar/Header/index.tsx @@ -1,33 +1,34 @@ -import React, {type ReactNode} from 'react'; -import {useNavbarMobileSidebar} from '@docusaurus/theme-common/internal'; -import {translate} from '@docusaurus/Translate'; -import NavbarColorModeToggle from '@theme/Navbar/ColorModeToggle'; -import IconClose from '@theme/Icon/Close'; -import NavbarLogo from '@theme/Navbar/Logo'; +import React, { type ReactNode } from "react" +import { useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" +import { translate } from "@docusaurus/Translate" +import NavbarColorModeToggle from "@theme/Navbar/ColorModeToggle" +import IconClose from "@theme/Icon/Close" +import NavbarLogo from "@theme/Navbar/Logo" function CloseButton() { - const mobileSidebar = useNavbarMobileSidebar(); - return ( - - ); + const mobileSidebar = useNavbarMobileSidebar() + return ( + + ) } export default function NavbarMobileSidebarHeader(): ReactNode { - return ( -
    - - {/* */} - -
    - ); + return ( +
    + + {/* */} + +
    + ) } diff --git a/src/theme/Navbar/MobileSidebar/Layout/index.tsx b/src/theme/Navbar/MobileSidebar/Layout/index.tsx index 268eef4..bc6b935 100644 --- a/src/theme/Navbar/MobileSidebar/Layout/index.tsx +++ b/src/theme/Navbar/MobileSidebar/Layout/index.tsx @@ -1,24 +1,25 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import {useNavbarSecondaryMenu} from '@docusaurus/theme-common/internal'; -import type {Props} from '@theme/Navbar/MobileSidebar/Layout'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import { useNavbarSecondaryMenu } from "@docusaurus/theme-common/internal" +import type { Props } from "@theme/Navbar/MobileSidebar/Layout" export default function NavbarMobileSidebarLayout({ - header, - primaryMenu, - secondaryMenu, + header, + primaryMenu, + secondaryMenu, }: Props): ReactNode { - const {shown: secondaryMenuShown} = useNavbarSecondaryMenu(); - return ( -
    - {header} -
    -
    {primaryMenu}
    -
    {secondaryMenu}
    -
    -
    - ); + const { shown: secondaryMenuShown } = useNavbarSecondaryMenu() + return ( +
    + {header} +
    +
    {primaryMenu}
    +
    {secondaryMenu}
    +
    +
    + ) } diff --git a/src/theme/Navbar/MobileSidebar/PrimaryMenu/index.tsx b/src/theme/Navbar/MobileSidebar/PrimaryMenu/index.tsx index d5d0913..dbc5dfe 100644 --- a/src/theme/Navbar/MobileSidebar/PrimaryMenu/index.tsx +++ b/src/theme/Navbar/MobileSidebar/PrimaryMenu/index.tsx @@ -1,31 +1,26 @@ -import React, {type ReactNode} from 'react'; -import {useThemeConfig} from '@docusaurus/theme-common'; -import {useNavbarMobileSidebar} from '@docusaurus/theme-common/internal'; -import NavbarItem, {type Props as NavbarItemConfig} from '@theme/NavbarItem'; +import React, { type ReactNode } from "react" +import { useThemeConfig } from "@docusaurus/theme-common" +import { useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" +import NavbarItem, { type Props as NavbarItemConfig } from "@theme/NavbarItem" function useNavbarItems() { - // TODO temporary casting until ThemeConfig type is improved - return useThemeConfig().navbar.items as NavbarItemConfig[]; + // TODO temporary casting until ThemeConfig type is improved + return useThemeConfig().navbar.items as NavbarItemConfig[] } // The primary menu displays the navbar items export default function NavbarMobilePrimaryMenu(): ReactNode { - const mobileSidebar = useNavbarMobileSidebar(); + const mobileSidebar = useNavbarMobileSidebar() - // TODO how can the order be defined for mobile? - // Should we allow providing a different list of items? - const items = useNavbarItems(); + // TODO how can the order be defined for mobile? + // Should we allow providing a different list of items? + const items = useNavbarItems() - return ( -
      - {items.map((item, i) => ( - mobileSidebar.toggle()} - key={i} - /> - ))} -
    - ); + return ( +
      + {items.map((item, i) => ( + mobileSidebar.toggle()} key={i} /> + ))} +
    + ) } diff --git a/src/theme/Navbar/MobileSidebar/SecondaryMenu/index.tsx b/src/theme/Navbar/MobileSidebar/SecondaryMenu/index.tsx index eb4f1e9..4a5caf9 100644 --- a/src/theme/Navbar/MobileSidebar/SecondaryMenu/index.tsx +++ b/src/theme/Navbar/MobileSidebar/SecondaryMenu/index.tsx @@ -1,32 +1,33 @@ -import React, {type ComponentProps, type ReactNode} from 'react'; -import {useThemeConfig} from '@docusaurus/theme-common'; -import {useNavbarSecondaryMenu} from '@docusaurus/theme-common/internal'; -import Translate from '@docusaurus/Translate'; +import React, { type ComponentProps, type ReactNode } from "react" +import { useThemeConfig } from "@docusaurus/theme-common" +import { useNavbarSecondaryMenu } from "@docusaurus/theme-common/internal" +import Translate from "@docusaurus/Translate" -function SecondaryMenuBackButton(props: ComponentProps<'button'>) { - return ( - - ); +function SecondaryMenuBackButton(props: ComponentProps<"button">) { + return ( + + ) } // The secondary menu slides from the right and shows contextual information // such as the docs sidebar export default function NavbarMobileSidebarSecondaryMenu(): ReactNode { - const isPrimaryMenuEmpty = useThemeConfig().navbar.items.length === 0; - const secondaryMenu = useNavbarSecondaryMenu(); - return ( - <> - {/* edge-case: prevent returning to the primaryMenu when it's empty */} - {!isPrimaryMenuEmpty && ( - secondaryMenu.hide()} /> - )} - {secondaryMenu.content} - - ); + const isPrimaryMenuEmpty = useThemeConfig().navbar.items.length === 0 + const secondaryMenu = useNavbarSecondaryMenu() + return ( + <> + {/* edge-case: prevent returning to the primaryMenu when it's empty */} + {!isPrimaryMenuEmpty && ( + secondaryMenu.hide()} /> + )} + {secondaryMenu.content} + + ) } diff --git a/src/theme/Navbar/MobileSidebar/Toggle/index.tsx b/src/theme/Navbar/MobileSidebar/Toggle/index.tsx index d8f9a53..a7778e6 100644 --- a/src/theme/Navbar/MobileSidebar/Toggle/index.tsx +++ b/src/theme/Navbar/MobileSidebar/Toggle/index.tsx @@ -1,23 +1,23 @@ -import React, {type ReactNode} from 'react'; -import {useNavbarMobileSidebar} from '@docusaurus/theme-common/internal'; -import {translate} from '@docusaurus/Translate'; -import IconMenu from '@theme/Icon/Menu'; +import React, { type ReactNode } from "react" +import { useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" +import { translate } from "@docusaurus/Translate" +import IconMenu from "@theme/Icon/Menu" export default function MobileSidebarToggle(): ReactNode { - const {toggle, shown} = useNavbarMobileSidebar(); - return ( - - ); + const { toggle, shown } = useNavbarMobileSidebar() + return ( + + ) } diff --git a/src/theme/Navbar/MobileSidebar/index.tsx b/src/theme/Navbar/MobileSidebar/index.tsx index 26c9f1c..b2ff4a9 100644 --- a/src/theme/Navbar/MobileSidebar/index.tsx +++ b/src/theme/Navbar/MobileSidebar/index.tsx @@ -1,26 +1,23 @@ -import React, {type ReactNode} from 'react'; -import { - useLockBodyScroll, - useNavbarMobileSidebar, -} from '@docusaurus/theme-common/internal'; -import NavbarMobileSidebarLayout from '@theme/Navbar/MobileSidebar/Layout'; -import NavbarMobileSidebarHeader from '@theme/Navbar/MobileSidebar/Header'; -import NavbarMobileSidebarPrimaryMenu from '@theme/Navbar/MobileSidebar/PrimaryMenu'; -import NavbarMobileSidebarSecondaryMenu from '@theme/Navbar/MobileSidebar/SecondaryMenu'; +import React, { type ReactNode } from "react" +import { useLockBodyScroll, useNavbarMobileSidebar } from "@docusaurus/theme-common/internal" +import NavbarMobileSidebarLayout from "@theme/Navbar/MobileSidebar/Layout" +import NavbarMobileSidebarHeader from "@theme/Navbar/MobileSidebar/Header" +import NavbarMobileSidebarPrimaryMenu from "@theme/Navbar/MobileSidebar/PrimaryMenu" +import NavbarMobileSidebarSecondaryMenu from "@theme/Navbar/MobileSidebar/SecondaryMenu" export default function NavbarMobileSidebar(): ReactNode { - const mobileSidebar = useNavbarMobileSidebar(); - useLockBodyScroll(mobileSidebar.shown); + const mobileSidebar = useNavbarMobileSidebar() + useLockBodyScroll(mobileSidebar.shown) - if (!mobileSidebar.shouldRender) { - return null; - } + if (!mobileSidebar.shouldRender) { + return null + } - return ( - } - primaryMenu={} - secondaryMenu={} - /> - ); + return ( + } + primaryMenu={} + secondaryMenu={} + /> + ) } diff --git a/src/theme/Navbar/Search/index.tsx b/src/theme/Navbar/Search/index.tsx index 4da080c..e29d4a2 100644 --- a/src/theme/Navbar/Search/index.tsx +++ b/src/theme/Navbar/Search/index.tsx @@ -1,13 +1,9 @@ -import React, {type ReactNode} from 'react'; -import clsx from 'clsx'; -import type {Props} from '@theme/Navbar/Search'; +import React, { type ReactNode } from "react" +import clsx from "clsx" +import type { Props } from "@theme/Navbar/Search" -import styles from './styles.module.css'; +import styles from "./styles.module.css" -export default function NavbarSearch({children, className}: Props): ReactNode { - return ( -
    - {children} -
    - ); +export default function NavbarSearch({ children, className }: Props): ReactNode { + return
    {children}
    } diff --git a/src/theme/Navbar/Search/styles.module.css b/src/theme/Navbar/Search/styles.module.css index 9eeb293..8054771 100644 --- a/src/theme/Navbar/Search/styles.module.css +++ b/src/theme/Navbar/Search/styles.module.css @@ -3,19 +3,18 @@ Workaround to avoid rendering empty search container See https://github.com/facebook/docusaurus/pull/9385 */ .navbarSearchContainer:empty { - display: none; + display: none; } @media (max-width: 996px) { - .navbarSearchContainer { - position: absolute; - right: var(--ifm-navbar-padding-horizontal); - } + .navbarSearchContainer { + position: absolute; + right: var(--ifm-navbar-padding-horizontal); + } } @media (min-width: 997px) { - .navbarSearchContainer { - padding: var(--ifm-navbar-item-padding-vertical) - var(--ifm-navbar-item-padding-horizontal); - } + .navbarSearchContainer { + padding: var(--ifm-navbar-item-padding-vertical) var(--ifm-navbar-item-padding-horizontal); + } } diff --git a/src/theme/Navbar/index.tsx b/src/theme/Navbar/index.tsx index 48ce38a..58bdee7 100644 --- a/src/theme/Navbar/index.tsx +++ b/src/theme/Navbar/index.tsx @@ -1,11 +1,11 @@ -import React, {type ReactNode} from 'react'; -import NavbarLayout from '@theme/Navbar/Layout'; -import NavbarContent from '@theme/Navbar/Content'; +import React, { type ReactNode } from "react" +import NavbarLayout from "@theme/Navbar/Layout" +import NavbarContent from "@theme/Navbar/Content" export default function Navbar(): ReactNode { - return ( - - - - ); + return ( + + + + ) } diff --git a/tailwind.config.js b/tailwind.config.js index 681abd9..052b2bf 100644 --- a/tailwind.config.js +++ b/tailwind.config.js @@ -1,21 +1,21 @@ /** @type {import('tailwindcss').Config} */ module.exports = { - content: ["./src/**/*.{jsx,tsx,html}"], - theme: { - extend: { - colors: { - "poly-black": "#000000", - "poly-white": "#f5f5f5", - "poly-green-1": "#9fa9ed", - "poly-green-2": "#3577ff", - "poly-indigo-1": "#8571FF", - "poly-indigo-2": "#b3a8ff" - }, - fontFamily: { - body: ['Satoshi', 'sans'], - mono: ['Spline Sans Mono', 'monospace'] - } + content: ["./src/**/*.{jsx,tsx,html}"], + theme: { + extend: { + colors: { + "poly-black": "#000000", + "poly-white": "#f5f5f5", + "poly-green-1": "#9fa9ed", + "poly-green-2": "#3577ff", + "poly-indigo-1": "#8571FF", + "poly-indigo-2": "#b3a8ff", + }, + fontFamily: { + body: ["Satoshi", "sans"], + mono: ["Spline Sans Mono", "monospace"], + }, + }, }, - }, - plugins: [], + plugins: [], } diff --git a/tsconfig.json b/tsconfig.json index 920d7a6..f79b805 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,8 +1,8 @@ { - // This file is not used in compilation. It is here just for a nice editor experience. - "extends": "@docusaurus/tsconfig", - "compilerOptions": { - "baseUrl": "." - }, - "exclude": [".docusaurus", "build"] + // This file is not used in compilation. It is here just for a nice editor experience. + "extends": "@docusaurus/tsconfig", + "compilerOptions": { + "baseUrl": "." + }, + "exclude": [".docusaurus", "build"] }