diff --git a/.editorconfig b/.editorconfig index 17a83ee5..ccf62fcc 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,15 +8,32 @@ insert_final_newline = true trim_trailing_whitespace = true indent_style = space indent_size = 2 -max_line_length = 80 [*.{d,h,hpp,c,cpp,cxx,cs,hs,java,kt,py,rs,sol}] indent_size = 4 -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +[{Makefile,go.mod,go.sum,*.go,.gitmodules,dub.selections.json}] indent_style = tab indent_size = 4 [{CMakeLists.txt,*.cmake}] indent_size = 2 indent_style = space + +[*.md] +indent_style = space +# Prettier handles formatting for markdown files +# and `indent_size` is not enforcable because ordered and unordered lists use +# different indentation: +indent_size = unset + +[**/Cargo.lock] +indent_style = space +indent_size = unset + +[*.wit] +indent_style = space +indent_size = 2 + +[*.rs] +indent_size = unset diff --git a/.envrc b/.envrc index 44469957..4dfc2219 100644 --- a/.envrc +++ b/.envrc @@ -1,9 +1,9 @@ # shellcheck shell=bash -if ! has nix_direnv_version || ! nix_direnv_version 2.4.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.4.0/direnvrc" "sha256-XQzUAvL6pysIJnRJyR7uVpmUSZfc7LSgWQwq/4mBr1U=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM=" fi dotenv_if_exists use flake -nix_direnv_watch_file "$(find ./shells -name "*.nix" -printf '"%p" ')" +watch_file "$(find ./shells -name "*.nix" -printf '"%p" ')" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0844ba29..195e5e9c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,17 +1,16 @@ version: 2 updates: - -- package-ecosystem: github-actions - directory: "/" - schedule: - interval: daily - time: '00:00' - timezone: UTC - commit-message: - prefix: "chore" - include: "scope" - groups: - gh-actions: - patterns: - # Bundle all GH Actions updates in a single PR - - "*" + - package-ecosystem: github-actions + directory: '/' + schedule: + interval: daily + time: '00:00' + timezone: UTC + commit-message: + prefix: 'chore' + include: 'scope' + groups: + gh-actions: + patterns: + # Bundle all GH Actions updates in a single PR + - '*' diff --git a/.github/install-nix/action.yml b/.github/install-nix/action.yml index 2a16e23d..5237143b 100644 --- a/.github/install-nix/action.yml +++ b/.github/install-nix/action.yml @@ -22,7 +22,7 @@ inputs: required: false runs: - using: "composite" + using: 'composite' steps: - name: Install Nix uses: cachix/install-nix-action@v27 diff --git a/.github/print-matrix/action.yml b/.github/print-matrix/action.yml index 90327e68..8fb486e9 100644 --- a/.github/print-matrix/action.yml +++ b/.github/print-matrix/action.yml @@ -38,7 +38,7 @@ outputs: value: ${{ steps.print-matrix.outputs.comment }} runs: - using: "composite" + using: 'composite' steps: - name: Install Nix uses: metacraft-labs/nixos-modules/.github/install-nix@main diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 57c6336c..88c8ab7c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,11 +11,34 @@ on: pull_request: branches: - main - push: - branches: - - main + +concurrency: + group: ${{ github.workflow }}-${{ github.repository }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: + lint: + uses: ./.github/workflows/reusable-lint.yml + secrets: inherit + with: + runner: '["self-hosted", "Linux", "x86-64-v2"]' + + test-mcl: + runs-on: self-hosted + steps: + - name: Install Nix + uses: metacraft-labs/nixos-modules/.github/install-nix@main + with: + cachix-cache: ${{ vars.CACHIX_CACHE }} + cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }} + trusted-public-keys: ${{ vars.TRUSTED_PUBLIC_KEYS }} + substituters: ${{ vars.SUBSTITUTERS }} + + - uses: actions/checkout@v4 + + - name: Build and test the `mcl` command + run: nix develop -c sh -c "dub test --root packages/mcl -- -e 'fetchJson|(coda\.)|nix.run|nix.build'" + ci: uses: ./.github/workflows/reusable-flake-checks-ci-matrix.yml secrets: inherit diff --git a/.github/workflows/mcl.yml b/.github/workflows/mcl.yml deleted file mode 100644 index 160478b0..00000000 --- a/.github/workflows/mcl.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: 'MCL' - -on: - # Allow this workflow to be triggered manually: - workflow_dispatch: - - # Allow this workflow to be triggered in merge - merge_group: - - push: - branches: - - main - pull_request: - branches: - - main - -concurrency: - group: ${{ github.workflow }}-${{ github.repository }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - test-mcl: - runs-on: self-hosted - steps: - - name: Install Nix - uses: metacraft-labs/nixos-modules/.github/install-nix@main - with: - cachix-cache: ${{ vars.CACHIX_CACHE }} - cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }} - trusted-public-keys: ${{ vars.TRUSTED_PUBLIC_KEYS }} - substituters: ${{ vars.SUBSTITUTERS }} - - - uses: actions/checkout@v4 - - - name: Build and test the `mcl` command - run: nix develop -c sh -c "dub test --root packages/mcl -- -e 'fetchJson|(coda\.)|nix.run|nix.build'" diff --git a/.github/workflows/reusable-lint.yml b/.github/workflows/reusable-lint.yml new file mode 100644 index 00000000..fb14c2eb --- /dev/null +++ b/.github/workflows/reusable-lint.yml @@ -0,0 +1,37 @@ +name: 'Lint' + +on: + # Allow this workflow to be reused by other workflows: + workflow_call: + inputs: + runner: + description: 'JSON-encoded list of runner labels' + default: '["self-hosted"]' + required: false + type: string + + secrets: + NIX_GITHUB_TOKEN: + description: GitHub token to add as access-token in nix.conf + required: false + CACHIX_AUTH_TOKEN: + description: 'Cachix auth token' + required: true + +jobs: + lint: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + + - name: Install Nix + uses: metacraft-labs/nixos-modules/.github/install-nix@main + with: + nix-github-token: ${{ secrets.NIX_GITHUB_TOKEN }} + cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }} + cachix-cache: ${{ vars.CACHIX_CACHE }} + trusted-public-keys: ${{ vars.TRUSTED_PUBLIC_KEYS }} + substituters: ${{ vars.SUBSTITUTERS }} + + - name: Check formatting + run: nix develop --accept-flake-config .#pre-commit -c pre-commit run --all --show-diff-on-failure --color=always diff --git a/.github/workflows/reusable-update-flake-lock.yml b/.github/workflows/reusable-update-flake-lock.yml index 96c54899..42d1a4cf 100644 --- a/.github/workflows/reusable-update-flake-lock.yml +++ b/.github/workflows/reusable-update-flake-lock.yml @@ -1,4 +1,4 @@ -name: "Update Nix Flake lockfile" +name: 'Update Nix Flake lockfile' on: # Allow this workflow to be reused by other workflows: diff --git a/.github/workflows/reusable-update-flake-packages.yml b/.github/workflows/reusable-update-flake-packages.yml index 1ab90fd5..997db874 100644 --- a/.github/workflows/reusable-update-flake-packages.yml +++ b/.github/workflows/reusable-update-flake-packages.yml @@ -1,4 +1,4 @@ -name: "Update Flake Packages ❄️" +name: 'Update Flake Packages ❄️' on: # Allow this workflow to be reused by other workflows: @@ -29,7 +29,7 @@ on: # Run everyday at 00:00: schedule: - - cron: "0 0 * * *" # https://crontab.guru/#0_0_*_*_* + - cron: '0 0 * * *' # https://crontab.guru/#0_0_*_*_* jobs: updateFlakePackages: @@ -57,4 +57,4 @@ jobs: uses: metacraft-labs/nix-update-action@main with: token: ${{ steps.generate-token.outputs.token }} - blacklist: "ci-matrix,folder-size-metrics,mcl,grafana-agent,validator-ejector" + blacklist: 'ci-matrix,folder-size-metrics,mcl,grafana-agent,validator-ejector' diff --git a/.gitignore b/.gitignore index 6e10d488..038b447a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,6 @@ matrix-*.json shardMatrix.json .vscode + +# Pre Commit +.pre-commit-config.yaml diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000..60150f76 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,8 @@ +.direnv +node_modules +.yarn +.pnp.cjs +.pnp.loader.mjs +*.json +flake.lock +*.mdx diff --git a/.prettierrc.json b/.prettierrc.json new file mode 100644 index 00000000..48494c76 --- /dev/null +++ b/.prettierrc.json @@ -0,0 +1,8 @@ +{ + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "semi": true, + "arrowParens": "avoid", + "trailingComma": "all" +} diff --git a/README.md b/README.md index bdaeb694..73a69eec 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ Evaluates each package, and compares it to it's last cached version, creating a This command is not meant to be run manually, but rather to be ran by the CI. ENV Variables: + - IS_INITIAL: `true` or `false` - CACHIX_CACHE: Which cachix cache to search - CACHIX_AUTH_TOKEN: The auth token for the cache @@ -36,6 +37,7 @@ Usage: Use `mcl ci` instead Evaluates each package, and compares it to it's last cached version, creating a table listing which packages are cached, which aren't, and which failed. ENV Variables: + - IS_INITIAL: `true` or `false` - CACHIX_CACHE: Which cachix cache to search - CACHIX_AUTH_TOKEN: The auth token for the cache @@ -53,6 +55,7 @@ Usage: `mcl deploy_spec` ### get_fstab ENV Variables: + - IS_INITIAL: `true` or `false` - CACHIX_CACHE: Which cachix cache to search - CACHIX_AUTH_TOKEN: The auth token for the cache @@ -74,9 +77,11 @@ Usage: `mcl host_info` Create a starting nix configuration for target machine. ENV Variables: + - SSH_PATH: SSH path of target machine The remaining ENV variables are optional, and if missing will be prompted at runtime. + - CREATE_USER: bool - USER_NAME: string - MACHINE_NAME: string @@ -93,6 +98,7 @@ Usage: `mcl machine_create` Splits the list of packages under `checks` into n number of shards. Requires manual configuration using modules/shard-split. See this repo and `nix-blockchain-development` ENV Variables: + - [Optional] GITHUB_OUTPUT: If set, exports results to GITHUB_INPUT env variable Usage: `mcl shard_matrix` diff --git a/checks/default.nix b/checks/default.nix new file mode 100644 index 00000000..0379200d --- /dev/null +++ b/checks/default.nix @@ -0,0 +1,7 @@ +{ ... }: +{ + imports = [ + ./packages-ci-matrix.nix + ./pre-commit.nix + ]; +} diff --git a/checks/packages-ci-matrix.nix b/checks/packages-ci-matrix.nix new file mode 100644 index 00000000..4a8c92cf --- /dev/null +++ b/checks/packages-ci-matrix.nix @@ -0,0 +1,57 @@ +{ + lib, + inputs, + ... +}: +{ + perSystem = + { + inputs', + self', + pkgs, + ... + }: + let + inherit (lib) optionalAttrs; + inherit (pkgs) system; + inherit (pkgs.hostPlatform) isLinux; + in + rec { + checks = + self'.packages + // { + inherit (self'.legacyPackages) rustToolchain; + inherit (self'.legacyPackages.inputs.dlang-nix) dub; + inherit (self'.legacyPackages.inputs.nixpkgs) + cachix + nix + nix-eval-jobs + nix-fast-build + ; + inherit (self'.legacyPackages.inputs.ethereum-nix) foundry; + } + // optionalAttrs (system == "x86_64-linux" || system == "aarch64-darwin") { + inherit (self'.legacyPackages.inputs.ethereum-nix) geth; + } + // optionalAttrs isLinux { + inherit (inputs'.validator-ejector.packages) validator-ejector; + } + // optionalAttrs (system == "x86_64-linux") { + inherit (pkgs) terraform; + inherit (self'.legacyPackages.inputs.terranix) terranix; + inherit (self'.legacyPackages.inputs.dlang-nix) + dcd + dscanner + serve-d + dmd + ldc + ; + inherit (self'.legacyPackages.inputs.ethereum-nix) + mev-boost + nethermind + web3signer + nimbus-eth2 + ; + }; + }; +} diff --git a/checks/pre-commit.nix b/checks/pre-commit.nix new file mode 100644 index 00000000..120cb2bf --- /dev/null +++ b/checks/pre-commit.nix @@ -0,0 +1,79 @@ +{ inputs, ... }: +{ + flake.flakeModules.git-hooks = + { ... }: + { + imports = [ + # Import git-hooks flake-parts module + # docs: https://flake.parts/options/git-hooks-nix + inputs.git-hooks-nix.flakeModule + ]; + + config = { + perSystem = + { config, pkgs, ... }: + { + devShells.pre-commit = + let + inherit (config.pre-commit.settings) enabledPackages package rawConfig; + configFile = + pkgs.runCommand "pre-commit-config.json" + { + buildInputs = [ + pkgs.jq + package + ]; + passAsFile = [ "rawJSON" ]; + rawJSON = builtins.toJSON rawConfig; + } + '' + { + echo '# DO NOT MODIFY'; + echo '# This file was generated by git-hooks.nix'; + jq . <"$rawJSONPath" + } >$out + ''; + in + pkgs.mkShell { + packages = enabledPackages ++ [ package ]; + shellHook = '' + ln -fs ${configFile} .pre-commit-config.yaml + echo "Running Pre-commit checks" + echo "=========================" + ''; + }; + + # impl: https://github.com/cachix/git-hooks.nix/blob/master/flake-module.nix + pre-commit = { + # Disable `checks` flake output + check.enable = false; + + # Enable commonly used formatters + settings.hooks = { + # Basic whitespace formatting + end-of-file-fixer.enable = true; + editorconfig-checker.enable = true; + + # *.nix formatting + nixfmt-rfc-style.enable = true; + + # *.rs formatting + rustfmt.enable = true; + + # *.{js,jsx,ts,tsx,css,html,md,json} formatting + prettier = { + enable = true; + args = [ + "--check" + "--list-different=false" + "--log-level=warn" + "--ignore-unknown" + "--write" + ]; + }; + }; + }; + }; + }; + }; +} diff --git a/flake.nix b/flake.nix index 310880c4..1e714364 100644 --- a/flake.nix +++ b/flake.nix @@ -209,49 +209,61 @@ }; }; - outputs = inputs @ { - self, - nixpkgs, - flake-parts, - ... - }: let - lib = import "${nixpkgs}/lib"; - flake = import "${self}/flake.nix"; - in - flake-parts.lib.mkFlake {inherit inputs;} { + outputs = + inputs@{ + self, + nixpkgs, + flake-parts, + ... + }: + let + inherit (nixpkgs) lib; + in + flake-parts.lib.mkFlake { inherit inputs; } { imports = [ + ./checks ./modules ./packages + ./shells ]; - systems = ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin"]; - perSystem = { - system, - pkgs, - inputs', - ... - }: { - _module.args.pkgs = import nixpkgs { - inherit system; - config.allowUnfree = true; - }; - devShells.default = import ./shells/default.nix {inherit pkgs flake inputs';}; - devShells.ci = import ./shells/ci.nix {inherit pkgs;}; - }; - flake.lib.create = { - rootDir, - machinesDir ? null, - usersDir ? null, - }: { - dirs = { - lib = self + "/lib"; - services = self + "/services"; - modules = self + "/modules"; - machines = rootDir + "/machines"; + systems = [ + "x86_64-linux" + "aarch64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; + perSystem = + { system, ... }: + { + _module.args.pkgs = import nixpkgs { + inherit system; + config.allowUnfree = true; + }; }; - libs = { - make-config = import ./lib/make-config.nix {inherit lib rootDir machinesDir usersDir;}; - utils = import ./lib {inherit usersDir rootDir machinesDir;}; + flake.lib.create = + { + rootDir, + machinesDir ? null, + usersDir ? null, + }: + { + dirs = { + lib = self + "/lib"; + services = self + "/services"; + modules = self + "/modules"; + machines = rootDir + "/machines"; + }; + libs = { + make-config = import ./lib/make-config.nix { + inherit + lib + rootDir + machinesDir + usersDir + ; + }; + utils = import ./lib { inherit usersDir rootDir machinesDir; }; + }; }; - }; }; } diff --git a/lib/current-flake.nix b/lib/current-flake.nix index 80fe8aa1..39dbcf99 100644 --- a/lib/current-flake.nix +++ b/lib/current-flake.nix @@ -1,12 +1,18 @@ let currentFlake = builtins.fromJSON (builtins.readFile ../flake.lock); - inherit (currentFlake.nodes.nixos-2311.locked) owner repo rev narHash; + inherit (currentFlake.nodes.nixos-2311.locked) + owner + repo + rev + narHash + ; nixpkgs = builtins.fetchTarball { url = "https://github.com/${owner}/${repo}/archive/${rev}.tar.gz"; sha256 = narHash; }; flake = import ../flake.nix; -in { +in +{ inherit currentFlake flake; - lib = (import nixpkgs {system = "x86_64-linux";}).lib; + lib = (import nixpkgs { system = "x86_64-linux"; }).lib; } diff --git a/lib/default.nix b/lib/default.nix index 108559e5..8c4bb7fd 100644 --- a/lib/default.nix +++ b/lib/default.nix @@ -3,81 +3,90 @@ rootDir, machinesDir, ... -}: let +}: +let inherit (import ./current-flake.nix) lib; - inherit (builtins) attrValues map filter concatLists length listToAttrs typeOf split tail; + inherit (builtins) + attrValues + map + filter + concatLists + length + listToAttrs + typeOf + split + tail + ; inherit (lib) pipe filterAttrs strings; inherit (strings) concatStringsSep; -in rec { - isSubsetOf = needle: haystack: - length (lib.lists.intersectLists needle haystack) - == length needle; +in +rec { + isSubsetOf = needle: haystack: length (lib.lists.intersectLists needle haystack) == length needle; - haveCommonElements = needle: haystack: - length (lib.lists.intersectLists needle haystack) - > 0; + haveCommonElements = needle: haystack: length (lib.lists.intersectLists needle haystack) > 0; allUsersMembersOfAllGroups = groups: allUsersMembersOfAllGroups' usersInfo groups; - allUsersMembersOfAllGroups' = users: groups: - if groups == [] - then {} + allUsersMembersOfAllGroups' = + users: groups: + if groups == [ ] then + { } else - filterAttrs - (key: value: isSubsetOf groups (value.extraGroups or [])) - users; + filterAttrs (key: value: isSubsetOf groups (value.extraGroups or [ ])) users; allUsersMembersOfAnyGroup = groups: allUsersMembersOfAnyGroup' usersInfo groups; - allUsersMembersOfAnyGroup' = users: groups: - if groups == [] - then {} + allUsersMembersOfAnyGroup' = + users: groups: + if groups == [ ] then + { } else - filterAttrs - (key: value: haveCommonElements groups (value.extraGroups or [])) - users; + filterAttrs (key: value: haveCommonElements groups (value.extraGroups or [ ])) users; - missing = attrs: key: - !(attrs ? "${key}"); + missing = attrs: key: !(attrs ? "${key}"); allAssignedGroups = predefinedGroups: allAssignedGroups' usersInfo predefinedGroups; - allAssignedGroups' = users: predefinedGroups: + allAssignedGroups' = + users: predefinedGroups: pipe users [ attrValues - (map (u: u.extraGroups or [])) + (map (u: u.extraGroups or [ ])) concatLists lib.lists.unique (filter (g: missing predefinedGroups g)) (map (g: { name = g; - value = {}; + value = { }; })) listToAttrs ]; allUserKeysForGroup = groups: allUserKeysForGroup' usersInfo groups; - allUserKeysForGroup' = users: groups: - concatLists - (map (value: value.openssh.authorizedKeys.keys or []) - (attrValues (allUsersMembersOfAnyGroup' users groups))); + allUserKeysForGroup' = + users: groups: + concatLists ( + map (value: value.openssh.authorizedKeys.keys or [ ]) ( + attrValues (allUsersMembersOfAnyGroup' users groups) + ) + ); - zfsFileSystems = datasetList: let - zfsRoot = "zfs_root"; - splitPath = path: filter (x: (typeOf x) == "string") (split "/" path); - pathTail = path: concatStringsSep "/" (tail (splitPath path)); - makeZfs = zfsDataset: { - name = "/" + pathTail zfsDataset; - value = { - device = "${zfsRoot}/${zfsDataset}"; - fsType = "zfs"; - options = ["zfsutil"]; + zfsFileSystems = + datasetList: + let + zfsRoot = "zfs_root"; + splitPath = path: filter (x: (typeOf x) == "string") (split "/" path); + pathTail = path: concatStringsSep "/" (tail (splitPath path)); + makeZfs = zfsDataset: { + name = "/" + pathTail zfsDataset; + value = { + device = "${zfsRoot}/${zfsDataset}"; + fsType = "zfs"; + options = [ "zfsutil" ]; + }; }; - }; - in + in listToAttrs (map makeZfs datasetList); allUsers = builtins.attrNames ( - lib.filterAttrs - (n: v: v == "directory") - (builtins.readDir "${usersDir}") + lib.filterAttrs (n: v: v == "directory") (builtins.readDir "${usersDir}") ); readUserInfo = user: import "${usersDir}/${user}/user-info.nix"; diff --git a/lib/import-agenix.nix b/lib/import-agenix.nix index 059bf186..133633e4 100644 --- a/lib/import-agenix.nix +++ b/lib/import-agenix.nix @@ -1,14 +1,17 @@ -moduleName: { +moduleName: +{ config, lib, dirs, ... -}: let +}: +let machineConfigPath = config.mcl.host-info.configPath; secretDir = "${machineConfigPath}/secrets/${moduleName}"; vmSecretDir = "${vmConfig}/secrets/${moduleName}"; secrets = import "${dirs.services}/${moduleName}/agenix.nix"; -in { +in +{ age.secrets = secrets secretDir; virtualisation.vmVariant = { diff --git a/lib/make-config.nix b/lib/make-config.nix index bf902da5..5941613b 100644 --- a/lib/make-config.nix +++ b/lib/make-config.nix @@ -4,18 +4,23 @@ machinesDir, usersDir, ... -}: rec { - getMachines = type: (lib.pipe (builtins.readDir "${machinesDir}/${type}") [ - (lib.filterAttrs (n: v: v == "directory" && !(lib.hasPrefix "_" n))) - builtins.attrNames - ]); +}: +rec { + getMachines = + type: + (lib.pipe (builtins.readDir "${machinesDir}/${type}") [ + (lib.filterAttrs (n: v: v == "directory" && !(lib.hasPrefix "_" n))) + builtins.attrNames + ]); allServers = getMachines "server"; allDesktops = getMachines "desktop"; allMachines = allServers ++ allDesktops; - nixosConfigurations = machines: configurations: + nixosConfigurations = + machines: configurations: (lib.genAttrs machines (configurations false)) - // (lib.mapAttrs' (name: value: lib.nameValuePair "${name}-vm" value) - (lib.genAttrs machines (configurations true))); + // (lib.mapAttrs' (name: value: lib.nameValuePair "${name}-vm" value) ( + lib.genAttrs machines (configurations true) + )); } diff --git a/lib/shard-attrs.nix b/lib/shard-attrs.nix index 8df64e52..d114955f 100644 --- a/lib/shard-attrs.nix +++ b/lib/shard-attrs.nix @@ -1,17 +1,21 @@ lib: { - shardAttrs = attrs: shardSize: let - attrNames = builtins.attrNames attrs; - shardCount = builtins.ceil ((0.0 + (builtins.length attrNames)) / shardSize); - attrNameShards = lib.pipe (lib.range 0 (shardCount - 1)) [ - (builtins.map (i: lib.sublist (i * shardSize) shardSize attrNames)) - ]; - shards = lib.pipe attrNameShards [ - (lib.imap0 (i: shard: { - name = builtins.toString i; - value = lib.genAttrs shard (key: attrs.${key}); - })) - lib.listToAttrs - ]; - in + shardAttrs = + attrs: shardSize: + let + attrNames = builtins.attrNames attrs; + shardCount = builtins.ceil ((0.0 + (builtins.length attrNames)) / shardSize); + attrNameShards = lib.pipe (lib.range 0 (shardCount - 1)) [ + (builtins.map (i: lib.sublist (i * shardSize) shardSize attrNames)) + ]; + shards = lib.pipe attrNameShards [ + (lib.imap0 ( + i: shard: { + name = builtins.toString i; + value = lib.genAttrs shard (key: attrs.${key}); + } + )) + lib.listToAttrs + ]; + in shards; } diff --git a/lib/zfs-disko.nix b/lib/zfs-disko.nix index bd2cf4a4..6e88a8d3 100644 --- a/lib/zfs-disko.nix +++ b/lib/zfs-disko.nix @@ -1,203 +1,209 @@ rec { - makePrimaryZfsDisk = { - disk, - zpoolName, - espSizeGB, - swapSizeGB, - }: { - type = "disk"; - device = disk; - content = { - type = "table"; - format = "gpt"; - partitions = [ - { - name = "ESP"; - start = "0"; - end = "${toString espSizeGB}GiB"; - bootable = true; - fs-type = "fat32"; - content = { - type = "filesystem"; - format = "vfat"; - mountpoint = "/boot"; - }; - } - { - name = "zfs"; - start = "${toString espSizeGB}GiB"; - end = "-${toString swapSizeGB}GiB"; - part-type = "primary"; - content = { - type = "zfs"; - pool = "${zpoolName}"; - }; - } - { - name = "swap"; - start = "-${toString swapSizeGB}GiB"; - end = "100%"; - part-type = "primary"; - content = { - type = "swap"; - randomEncryption = true; - }; - } - ]; + makePrimaryZfsDisk = + { + disk, + zpoolName, + espSizeGB, + swapSizeGB, + }: + { + type = "disk"; + device = disk; + content = { + type = "table"; + format = "gpt"; + partitions = [ + { + name = "ESP"; + start = "0"; + end = "${toString espSizeGB}GiB"; + bootable = true; + fs-type = "fat32"; + content = { + type = "filesystem"; + format = "vfat"; + mountpoint = "/boot"; + }; + } + { + name = "zfs"; + start = "${toString espSizeGB}GiB"; + end = "-${toString swapSizeGB}GiB"; + part-type = "primary"; + content = { + type = "zfs"; + pool = "${zpoolName}"; + }; + } + { + name = "swap"; + start = "-${toString swapSizeGB}GiB"; + end = "100%"; + part-type = "primary"; + content = { + type = "swap"; + randomEncryption = true; + }; + } + ]; + }; }; - }; - makeSecondaryZfsDisk = { - disk, - zpoolName, - }: { - type = "disk"; - device = disk; - content = { - type = "table"; - format = "gpt"; - partitions = [ - { - name = "zfs"; - start = "0"; - end = "100%"; - part-type = "primary"; - content = { - type = "zfs"; - pool = "${zpoolName}"; - }; - } - ]; + makeSecondaryZfsDisk = + { + disk, + zpoolName, + }: + { + type = "disk"; + device = disk; + content = { + type = "table"; + format = "gpt"; + partitions = [ + { + name = "zfs"; + start = "0"; + end = "100%"; + part-type = "primary"; + content = { + type = "zfs"; + pool = "${zpoolName}"; + }; + } + ]; + }; }; - }; - makeZpool = { - config, - zpoolName, - }: { - ${zpoolName} = { - type = "zpool"; - rootFsOptions = { - acltype = "posixacl"; - atime = "off"; - canmount = "off"; - checksum = "sha512"; - compression = "lz4"; - xattr = "sa"; - mountpoint = "none"; - "com.sun:auto-snapshot" = "false"; - }; - options = { - autotrim = "on"; - listsnapshots = "on"; - }; + makeZpool = + { + config, + zpoolName, + }: + { + ${zpoolName} = { + type = "zpool"; + rootFsOptions = { + acltype = "posixacl"; + atime = "off"; + canmount = "off"; + checksum = "sha512"; + compression = "lz4"; + xattr = "sa"; + mountpoint = "none"; + "com.sun:auto-snapshot" = "false"; + }; + options = { + autotrim = "on"; + listsnapshots = "on"; + }; - postCreateHook = "zfs snapshot ${zpoolName}@blank"; + postCreateHook = "zfs snapshot ${zpoolName}@blank"; - datasets = { - root = { - mountpoint = "/"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "false"; - mountpoint = "legacy"; + datasets = { + root = { + mountpoint = "/"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "false"; + mountpoint = "legacy"; + }; }; - }; - "root/nix" = { - mountpoint = "/nix"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "false"; - canmount = "on"; - mountpoint = "legacy"; - refreservation = "100GiB"; + "root/nix" = { + mountpoint = "/nix"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "false"; + canmount = "on"; + mountpoint = "legacy"; + refreservation = "100GiB"; + }; }; - }; - "root/var" = { - mountpoint = "/var"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "true"; - canmount = "on"; - mountpoint = "legacy"; + "root/var" = { + mountpoint = "/var"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "true"; + canmount = "on"; + mountpoint = "legacy"; + }; }; - }; - "root/var/lib" = { - mountpoint = "/var/lib"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "true"; - canmount = "on"; - mountpoint = "legacy"; + "root/var/lib" = { + mountpoint = "/var/lib"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "true"; + canmount = "on"; + mountpoint = "legacy"; + }; }; - }; - "root/home" = { - mountpoint = "/home"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "true"; - canmount = "on"; - mountpoint = "legacy"; - refreservation = "200GiB"; + "root/home" = { + mountpoint = "/home"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "true"; + canmount = "on"; + mountpoint = "legacy"; + refreservation = "200GiB"; + }; }; - }; - "root/var/lib/docker" = { - mountpoint = "/var/lib/docker"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "false"; - canmount = "on"; - mountpoint = "legacy"; - refreservation = "100GiB"; + "root/var/lib/docker" = { + mountpoint = "/var/lib/docker"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "false"; + canmount = "on"; + mountpoint = "legacy"; + refreservation = "100GiB"; + }; }; - }; - "root/var/lib/containers" = { - mountpoint = "/var/lib/containers"; - type = "zfs_fs"; - options = { - "com.sun:auto-snapshot" = "false"; - canmount = "on"; - mountpoint = "legacy"; - refreservation = "100GiB"; + "root/var/lib/containers" = { + mountpoint = "/var/lib/containers"; + type = "zfs_fs"; + options = { + "com.sun:auto-snapshot" = "false"; + canmount = "on"; + mountpoint = "legacy"; + refreservation = "100GiB"; + }; }; }; }; }; - }; - makeZfsPartitions = { - disks, - config, - zpoolName ? "zfs_root", - espSizeGB ? 4, - swapSizeGB ? 32, - }: let - first = builtins.head disks; - rest = builtins.tail disks; - secondaryDisks = builtins.listToAttrs ( - builtins.map (disk: { - name = disk; - value = makeSecondaryZfsDisk {inherit disk zpoolName;}; - }) - rest - ); - in { - devices = { - disk = - secondaryDisks - // { + makeZfsPartitions = + { + disks, + config, + zpoolName ? "zfs_root", + espSizeGB ? 4, + swapSizeGB ? 32, + }: + let + first = builtins.head disks; + rest = builtins.tail disks; + secondaryDisks = builtins.listToAttrs ( + builtins.map (disk: { + name = disk; + value = makeSecondaryZfsDisk { inherit disk zpoolName; }; + }) rest + ); + in + { + devices = { + disk = secondaryDisks // { "${first}" = makePrimaryZfsDisk { disk = first; inherit zpoolName espSizeGB swapSizeGB; }; }; - zpool = makeZpool {inherit config zpoolName;}; + zpool = makeZpool { inherit config zpoolName; }; + }; }; - }; } diff --git a/modules/folder-size-metrics/default.nix b/modules/folder-size-metrics/default.nix index cd1238a3..5c40e647 100644 --- a/modules/folder-size-metrics/default.nix +++ b/modules/folder-size-metrics/default.nix @@ -1,52 +1,55 @@ -{withSystem, ...}: { - flake.nixosModules.folder-size-metrics = { - pkgs, - config, - lib, - ... - }: let - cfg = config.services.folder-size-metrics; - package = withSystem pkgs.stdenv.hostPlatform.system ( - {config, ...}: - config.packages.folder-size-metrics - ); - inherit (import ../lib.nix {inherit lib;}) toEnvVariables; - in { - options.services.folder-size-metrics = with lib; { - enable = mkEnableOption (lib.mdDoc "Folder Size Metrics"); - args = { - port = mkOption { - type = types.nullOr types.port; - default = null; - example = 8888; - }; +{ withSystem, ... }: +{ + flake.nixosModules.folder-size-metrics = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.folder-size-metrics; + package = withSystem pkgs.stdenv.hostPlatform.system ( + { config, ... }: config.packages.folder-size-metrics + ); + inherit (import ../lib.nix { inherit lib; }) toEnvVariables; + in + { + options.services.folder-size-metrics = with lib; { + enable = mkEnableOption (lib.mdDoc "Folder Size Metrics"); + args = { + port = mkOption { + type = types.nullOr types.port; + default = null; + example = 8888; + }; - base-path = mkOption { - type = types.nullOr types.str; - default = null; - example = "/var/lib"; - }; + base-path = mkOption { + type = types.nullOr types.str; + default = null; + example = "/var/lib"; + }; - interval-sec = mkOption { - type = types.int; - default = 60; + interval-sec = mkOption { + type = types.int; + default = 60; + }; }; }; - }; - config = { - systemd.services.folder-size-metrics = lib.mkIf cfg.enable { - description = "Folder Size Metrics"; + config = { + systemd.services.folder-size-metrics = lib.mkIf cfg.enable { + description = "Folder Size Metrics"; - wantedBy = ["multi-user.target"]; + wantedBy = [ "multi-user.target" ]; - environment = toEnvVariables cfg.args; + environment = toEnvVariables cfg.args; - path = [package]; + path = [ package ]; - serviceConfig = { - ExecStart = "${lib.getExe package}"; + serviceConfig = { + ExecStart = "${lib.getExe package}"; + }; }; }; }; - }; } diff --git a/modules/grafana-agent-flow/default.nix b/modules/grafana-agent-flow/default.nix index 01dfc3a0..888bfa3f 100644 --- a/modules/grafana-agent-flow/default.nix +++ b/modules/grafana-agent-flow/default.nix @@ -1,39 +1,41 @@ -{withSystem, ...}: { - flake.nixosModules.grafana-agent-flow = { - pkgs, - config, - lib, - ... - }: let - cfg = - config.services.grafana-agent-flow; - package = withSystem pkgs.stdenv.hostPlatform.system ( - {config, ...}: - config.packages.grafana-agent - ); - in { - options.services.grafana-agent-flow = with lib; { - enable = mkEnableOption (lib.mdDoc "Grafana Agent (Flow mode)"); - config-file = mkOption { - type = types.str; - default = "./config.river"; - example = "./config.river"; +{ withSystem, ... }: +{ + flake.nixosModules.grafana-agent-flow = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.grafana-agent-flow; + package = withSystem pkgs.stdenv.hostPlatform.system ( + { config, ... }: config.packages.grafana-agent + ); + in + { + options.services.grafana-agent-flow = with lib; { + enable = mkEnableOption (lib.mdDoc "Grafana Agent (Flow mode)"); + config-file = mkOption { + type = types.str; + default = "./config.river"; + example = "./config.river"; + }; }; - }; - config = { - systemd.services.grafana-agent-flow = lib.mkIf cfg.enable { - description = "Grafana Agent (Flow mode)"; + config = { + systemd.services.grafana-agent-flow = lib.mkIf cfg.enable { + description = "Grafana Agent (Flow mode)"; - wantedBy = ["multi-user.target"]; + wantedBy = [ "multi-user.target" ]; - environment = { - AGENT_MODE = "flow"; - }; + environment = { + AGENT_MODE = "flow"; + }; - serviceConfig = { - ExecStart = ''${package}/bin/grafana-agent-flow run ${cfg.config-file}''; + serviceConfig = { + ExecStart = ''${package}/bin/grafana-agent-flow run ${cfg.config-file}''; + }; }; }; }; - }; } diff --git a/modules/host-info.nix b/modules/host-info.nix index 265e4423..c3a3ba47 100644 --- a/modules/host-info.nix +++ b/modules/host-info.nix @@ -2,12 +2,18 @@ config, lib, ... -}: { +}: +{ options.mcl.host-info = with lib; { type = mkOption { - type = types.nullOr (types.enum ["desktop" "server"]); + type = types.nullOr ( + types.enum [ + "desktop" + "server" + ] + ); default = null; - example = ["desktop"]; + example = [ "desktop" ]; description = '' Whether this host is a desktop or a server. ''; @@ -16,7 +22,7 @@ isVM = mkOption { type = types.nullOr types.bool; default = null; - example = ["false"]; + example = [ "false" ]; description = '' Whether this configuration is a VM variant. ''; @@ -25,7 +31,7 @@ configPath = mkOption { type = types.nullOr types.string; default = null; - example = ["machines/server/solunska-server"]; + example = [ "machines/server/solunska-server" ]; description = '' The configuration path for this host relative to the repo root. ''; diff --git a/modules/lib.nix b/modules/lib.nix index dfd3d4d9..a021f243 100644 --- a/modules/lib.nix +++ b/modules/lib.nix @@ -1,16 +1,21 @@ -{lib}: let - inherit (lib) pipe mapAttrs mapAttrs' filterAttrs nameValuePair; +{ lib }: +let + inherit (lib) + pipe + mapAttrs + mapAttrs' + filterAttrs + nameValuePair + ; inherit (lib.strings) replaceStrings lowerChars upperChars; -in rec { - nixOptionNameToEnvVarName = str: - replaceStrings (lowerChars ++ ["-"]) (upperChars ++ ["_"]) str; +in +rec { + nixOptionNameToEnvVarName = str: replaceStrings (lowerChars ++ [ "-" ]) (upperChars ++ [ "_" ]) str; - toEnvVariables = args: + toEnvVariables = + args: pipe args [ - (mapAttrs (k: v: - if builtins.isString v - then v - else builtins.toJSON v)) + (mapAttrs (k: v: if builtins.isString v then v else builtins.toJSON v)) (filterAttrs (k: v: (v != "null") && (v != "") && (v != null))) (mapAttrs' (k: v: nameValuePair (nixOptionNameToEnvVarName k) v)) ]; diff --git a/modules/lido/ethereum-validators-monitoring/args.nix b/modules/lido/ethereum-validators-monitoring/args.nix index c6df4176..e77d2f50 100644 --- a/modules/lido/ethereum-validators-monitoring/args.nix +++ b/modules/lido/ethereum-validators-monitoring/args.nix @@ -1,19 +1,36 @@ -lib: -with lib; { +lib: with lib; { log-level = mkOption { - type = types.nullOr (types.enum ["error" "warning" "notice" "info" "debug"]); + type = types.nullOr ( + types.enum [ + "error" + "warning" + "notice" + "info" + "debug" + ] + ); default = null; description = "Application log level."; }; log-format = mkOption { - type = types.nullOr (types.enum ["simple" "json"]); + type = types.nullOr ( + types.enum [ + "simple" + "json" + ] + ); default = null; description = "Application log format."; }; working-mode = mkOption { - type = types.nullOr (types.enum ["finalized" "head"]); + type = types.nullOr ( + types.enum [ + "finalized" + "head" + ] + ); default = null; description = "Application working mode."; }; @@ -58,13 +75,28 @@ with lib; { }; node-env = mkOption { - type = types.nullOr (types.enum ["development" "production" "staging" "testnet" "test"]); + type = types.nullOr ( + types.enum [ + "development" + "production" + "staging" + "testnet" + "test" + ] + ); default = null; description = "Node.js environment."; }; eth-network = mkOption { - type = types.nullOr (types.enum [1 5 17000 1337702]); + type = types.nullOr ( + types.enum [ + 1 + 5 + 17000 + 1337702 + ] + ); description = "Ethereum network ID for connection execution layer RPC."; }; @@ -127,7 +159,13 @@ with lib; { }; validator-registry-source = mkOption { - type = types.nullOr (types.enum ["lido" "keysapi" "file"]); + type = types.nullOr ( + types.enum [ + "lido" + "keysapi" + "file" + ] + ); default = null; description = "Validators registry source."; }; diff --git a/modules/lido/ethereum-validators-monitoring/default.nix b/modules/lido/ethereum-validators-monitoring/default.nix index 2f6824a6..19b70711 100644 --- a/modules/lido/ethereum-validators-monitoring/default.nix +++ b/modules/lido/ethereum-validators-monitoring/default.nix @@ -1,138 +1,144 @@ -{withSystem, ...}: { - flake.nixosModules.ethereum-validators-monitoring = { - pkgs, - config, - lib, - ... - }: let - db = config.services.ethereum-validators-monitoring.db; - eachService = config.services.ethereum-validators-monitoring.instances; - inherit (import ../../lib.nix {inherit lib;}) toEnvVariables; +{ withSystem, ... }: +{ + flake.nixosModules.ethereum-validators-monitoring = + { + pkgs, + config, + lib, + ... + }: + let + db = config.services.ethereum-validators-monitoring.db; + eachService = config.services.ethereum-validators-monitoring.instances; + inherit (import ../../lib.nix { inherit lib; }) toEnvVariables; - args = import ./args.nix lib; + args = import ./args.nix lib; - monitoringOptions = with lib; { - options = { - enable = mkEnableOption (lib.mdDoc "Ethereum Validators Monitoring"); - inherit args; - }; - }; - in { - options.services.ethereum-validators-monitoring = with lib; { - instances = mkOption { - type = types.attrsOf (types.submodule monitoringOptions); - default = {}; - description = mdDoc "Specification of one or Ethereum Validators Monitoring instances."; + monitoringOptions = with lib; { + options = { + enable = mkEnableOption (lib.mdDoc "Ethereum Validators Monitoring"); + inherit args; + }; }; - - db = { - host = mkOption { - type = types.str; - description = "Clickhouse server host."; + in + { + options.services.ethereum-validators-monitoring = with lib; { + instances = mkOption { + type = types.attrsOf (types.submodule monitoringOptions); + default = { }; + description = mdDoc "Specification of one or Ethereum Validators Monitoring instances."; }; - user = mkOption { - type = types.str; - description = "Clickhouse server user."; - }; + db = { + host = mkOption { + type = types.str; + description = "Clickhouse server host."; + }; - password-file = mkOption { - type = types.path; - description = "Clickhouse server password file."; - }; + user = mkOption { + type = types.str; + description = "Clickhouse server user."; + }; - name = mkOption { - type = types.str; - description = "Clickhouse server DB name."; - }; + password-file = mkOption { + type = types.path; + description = "Clickhouse server password file."; + }; + + name = mkOption { + type = types.str; + description = "Clickhouse server DB name."; + }; - port = mkOption { - type = types.port; - default = 8123; - description = "Clickhouse server port."; + port = mkOption { + type = types.port; + default = 8123; + description = "Clickhouse server port."; + }; }; }; - }; - config = lib.mkIf (eachService != {}) { - systemd.services.ethereum-validators-monitoring-preStart = { - after = ["network.target"]; - wantedBy = ["multi-user.target"]; - script = '' - umask 177 - mkdir -p /var/lib/ethereum-validators-monitoring - echo DB_PASSWORD="$(cat ${db.password-file})" > /var/lib/ethereum-validators-monitoring/env - echo CLICKHOUSE_PASSWORD="$(cat ${db.password-file})" > /var/lib/ethereum-validators-monitoring/clickhouse-env - ''; - }; + config = lib.mkIf (eachService != { }) { + systemd.services.ethereum-validators-monitoring-preStart = { + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + script = '' + umask 177 + mkdir -p /var/lib/ethereum-validators-monitoring + echo DB_PASSWORD="$(cat ${db.password-file})" > /var/lib/ethereum-validators-monitoring/env + echo CLICKHOUSE_PASSWORD="$(cat ${db.password-file})" > /var/lib/ethereum-validators-monitoring/clickhouse-env + ''; + }; - virtualisation.oci-containers = { - backend = "docker"; - containers = - (lib.mapAttrs' - ( - name: let + virtualisation.oci-containers = { + backend = "docker"; + containers = + (lib.mapAttrs' ( + name: + let serviceName = "ethereum-validators-monitoring-${name}"; in - cfg: - lib.nameValuePair serviceName (lib.mkIf cfg.enable { - image = "lidofinance/ethereum-validators-monitoring:4.5.1"; - environment = - (toEnvVariables cfg.args) - // { - DB_HOST = db.host; - DB_USER = db.user; - DB_NAME = db.name; - DB_PORT = toString db.port; - }; - environmentFiles = ["/var/lib/ethereum-validators-monitoring/env"]; - ports = ["${toString cfg.args.external-http-port}:${toString cfg.args.external-http-port}"]; - dependsOn = ["clickhouse-server"]; - extraOptions = [ - "--network=host" - ]; - }) - ) - eachService) - // { - clickhouse-server = { - image = "yandex/clickhouse-server"; - environment = { - CLICKHOUSE_USER = db.user; - CLICKHOUSE_DB = db.name; + cfg: + lib.nameValuePair serviceName ( + lib.mkIf cfg.enable { + image = "lidofinance/ethereum-validators-monitoring:4.5.1"; + environment = (toEnvVariables cfg.args) // { + DB_HOST = db.host; + DB_USER = db.user; + DB_NAME = db.name; + DB_PORT = toString db.port; + }; + environmentFiles = [ "/var/lib/ethereum-validators-monitoring/env" ]; + ports = [ "${toString cfg.args.external-http-port}:${toString cfg.args.external-http-port}" ]; + dependsOn = [ "clickhouse-server" ]; + extraOptions = [ + "--network=host" + ]; + } + ) + ) eachService) + // { + clickhouse-server = { + image = "yandex/clickhouse-server"; + environment = { + CLICKHOUSE_USER = db.user; + CLICKHOUSE_DB = db.name; + }; + environmentFiles = [ "/var/lib/ethereum-validators-monitoring/clickhouse-env" ]; + ports = [ "${toString db.port}:${toString db.port}" ]; + volumes = [ "./.volumes/clickhouse:/var/lib/clickhouse" ]; + extraOptions = [ + "--network=host" + ]; }; - environmentFiles = ["/var/lib/ethereum-validators-monitoring/clickhouse-env"]; - ports = ["${toString db.port}:${toString db.port}"]; - volumes = ["./.volumes/clickhouse:/var/lib/clickhouse"]; - extraOptions = [ - "--network=host" - ]; - }; - clickhouse-client = { - image = "yandex/clickhouse-client"; - entrypoint = "/usr/bin/env"; - cmd = ["sleep" "infinity"]; - extraOptions = [ - "--network=host" - ]; - }; + clickhouse-client = { + image = "yandex/clickhouse-client"; + entrypoint = "/usr/bin/env"; + cmd = [ + "sleep" + "infinity" + ]; + extraOptions = [ + "--network=host" + ]; + }; - cadvisor = { - image = "zcube/cadvisor:latest"; - ports = ["8080:8080"]; - volumes = [ - "/:/rootfs:ro" - "/var/run:/var/run:rw" - "/sys:/sys:ro" - "/var/lib/docker/:/var/lib/docker:ro" - ]; - extraOptions = [ - "--network=host" - ]; + cadvisor = { + image = "zcube/cadvisor:latest"; + ports = [ "8080:8080" ]; + volumes = [ + "/:/rootfs:ro" + "/var/run:/var/run:rw" + "/sys:/sys:ro" + "/var/lib/docker/:/var/lib/docker:ro" + ]; + extraOptions = [ + "--network=host" + ]; + }; }; - }; + }; }; }; - }; } diff --git a/modules/lido/keys-api/default.nix b/modules/lido/keys-api/default.nix index e87a5387..3b8b405d 100644 --- a/modules/lido/keys-api/default.nix +++ b/modules/lido/keys-api/default.nix @@ -1,203 +1,207 @@ -{withSystem, ...}: { - flake.nixosModules.lido-keys-api = { - pkgs, - config, - lib, - ... - }: let - cfg = config.services.lido-keys-api; - inherit (import ../../lib.nix {inherit lib;}) toEnvVariables; - in { - options.services.lido-keys-api = with lib; { - enable = mkEnableOption (lib.mdDoc "Lido Keys API"); - args = { - port = mkOption { - type = types.nullOr types.port; - default = null; - example = 3000; - }; +{ withSystem, ... }: +{ + flake.nixosModules.lido-keys-api = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.lido-keys-api; + inherit (import ../../lib.nix { inherit lib; }) toEnvVariables; + in + { + options.services.lido-keys-api = with lib; { + enable = mkEnableOption (lib.mdDoc "Lido Keys API"); + args = { + port = mkOption { + type = types.nullOr types.port; + default = null; + example = 3000; + }; - cors-whitelist-regexp = mkOption { - type = types.nullOr types.str; - default = null; - example = "^https?://(?:.+?\.)?(?:lido|testnet|mainnet)\.fi$"; - }; + cors-whitelist-regexp = mkOption { + type = types.nullOr types.str; + default = null; + example = "^https?://(?:.+?\.)?(?:lido|testnet|mainnet)\.fi$"; + }; - global-throttle-ttl = mkOption { - type = types.nullOr types.int; - default = null; - example = 5; - description = '' - The number of seconds that each request will last in storage - ''; - }; + global-throttle-ttl = mkOption { + type = types.nullOr types.int; + default = null; + example = 5; + description = '' + The number of seconds that each request will last in storage + ''; + }; - global-throttle-limit = mkOption { - type = types.nullOr types.int; - default = null; - example = 100; - description = '' - The maximum number of requests within the TTL limit - ''; - }; + global-throttle-limit = mkOption { + type = types.nullOr types.int; + default = null; + example = 100; + description = '' + The maximum number of requests within the TTL limit + ''; + }; - global-cache-ttl = mkOption { - type = types.nullOr types.int; - default = null; - example = 1; - description = '' - Cache expiration time in seconds - ''; - }; + global-cache-ttl = mkOption { + type = types.nullOr types.int; + default = null; + example = 1; + description = '' + Cache expiration time in seconds + ''; + }; - sentry-dsn = mkOption { - type = types.nullOr types.str; - default = null; - example = ""; - }; + sentry-dsn = mkOption { + type = types.nullOr types.str; + default = null; + example = ""; + }; - log-level = mkOption { - type = types.nullOr types.str; - default = null; - example = "debug"; - description = '' - Log level: debug, info, notice, warning or error - ''; - }; + log-level = mkOption { + type = types.nullOr types.str; + default = null; + example = "debug"; + description = '' + Log level: debug, info, notice, warning or error + ''; + }; - log-format = mkOption { - type = types.nullOr types.str; - default = null; - example = "json"; - description = '' - Log format: simple or json - ''; - }; + log-format = mkOption { + type = types.nullOr types.str; + default = null; + example = "json"; + description = '' + Log format: simple or json + ''; + }; - providers-urls = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://mainnet.infura.io/v3/XXX,https://eth-mainnet.alchemyapi.io/v2/YYY"; - }; + providers-urls = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://mainnet.infura.io/v3/XXX,https://eth-mainnet.alchemyapi.io/v2/YYY"; + }; - chronix-provider-mainnet-url = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://mainnet.infura.io/v3/XXX,https://eth-mainnet.alchemyapi.io/v2/YYY"; - description = '' - provider url for e2e tests - ''; - }; + chronix-provider-mainnet-url = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://mainnet.infura.io/v3/XXX,https://eth-mainnet.alchemyapi.io/v2/YYY"; + description = '' + provider url for e2e tests + ''; + }; - chronix-provider-goerli-url = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://goerli.infura.io/v3/XXX,https://eth-goerli.alchemyapi.io/v2/YYY"; - description = '' - provider url for e2e tests - ''; - }; + chronix-provider-goerli-url = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://goerli.infura.io/v3/XXX,https://eth-goerli.alchemyapi.io/v2/YYY"; + description = '' + provider url for e2e tests + ''; + }; - chain-id = mkOption { - type = types.nullOr types.int; - default = null; - example = 1; - description = '' - chain id - for mainnet 1 - for testnet 5 - ''; - }; - db-name = mkOption { - type = types.nullOr types.str; - default = null; - example = "node_operator_keys_service_db"; - }; + chain-id = mkOption { + type = types.nullOr types.int; + default = null; + example = 1; + description = '' + chain id + for mainnet 1 + for testnet 5 + ''; + }; + db-name = mkOption { + type = types.nullOr types.str; + default = null; + example = "node_operator_keys_service_db"; + }; - db-port = mkOption { - type = types.nullOr types.port; - default = null; - example = 5432; - }; + db-port = mkOption { + type = types.nullOr types.port; + default = null; + example = 5432; + }; - db-host = mkOption { - type = types.nullOr types.str; - default = null; - example = "localhost"; - }; + db-host = mkOption { + type = types.nullOr types.str; + default = null; + example = "localhost"; + }; - db-user = mkOption { - type = types.nullOr types.str; - default = null; - example = "postgres"; - }; + db-user = mkOption { + type = types.nullOr types.str; + default = null; + example = "postgres"; + }; - db-password = mkOption { - type = types.nullOr types.str; - default = null; - example = "postgres"; - }; + db-password = mkOption { + type = types.nullOr types.str; + default = null; + example = "postgres"; + }; - provider-json-rpc-max-batch-size = mkOption { - type = types.nullOr types.int; - default = null; - example = 100; - }; + provider-json-rpc-max-batch-size = mkOption { + type = types.nullOr types.int; + default = null; + example = 100; + }; - provider-concurrent-requests = mkOption { - type = types.nullOr types.int; - default = null; - example = 5; - }; + provider-concurrent-requests = mkOption { + type = types.nullOr types.int; + default = null; + example = 5; + }; - provider-batch-aggregation-wait-ms = mkOption { - type = types.nullOr types.int; - default = null; - example = 10; - }; + provider-batch-aggregation-wait-ms = mkOption { + type = types.nullOr types.int; + default = null; + example = 10; + }; - cl-api-urls = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://quiknode.pro/"; - }; + cl-api-urls = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://quiknode.pro/"; + }; - validator-registry-enable = mkOption { - type = types.nullOr types.bool; - default = null; - example = true; + validator-registry-enable = mkOption { + type = types.nullOr types.bool; + default = null; + example = true; + }; }; }; - }; - config = { - virtualisation.oci-containers = lib.mkIf cfg.enable { - backend = "docker"; - containers = { - lido-keys-api = { - image = "lidofinance/lido-keys-api:1.0.2"; - environment = toEnvVariables cfg.args; - ports = ["${toString cfg.args.port}:${toString cfg.args.port}"]; - dependsOn = ["postgresql-lido"]; - extraOptions = [ - "--network=host" - ]; - }; - - postgresql-lido = { - image = "postgres:16-alpine"; - environment = { - POSTGRES_DB = "${cfg.args.db-name}"; - POSTGRES_USER = "${cfg.args.db-user}"; - POSTGRES_PASSWORD = "${cfg.args.db-password}"; + config = { + virtualisation.oci-containers = lib.mkIf cfg.enable { + backend = "docker"; + containers = { + lido-keys-api = { + image = "lidofinance/lido-keys-api:1.0.2"; + environment = toEnvVariables cfg.args; + ports = [ "${toString cfg.args.port}:${toString cfg.args.port}" ]; + dependsOn = [ "postgresql-lido" ]; + extraOptions = [ + "--network=host" + ]; + }; + + postgresql-lido = { + image = "postgres:16-alpine"; + environment = { + POSTGRES_DB = "${cfg.args.db-name}"; + POSTGRES_USER = "${cfg.args.db-user}"; + POSTGRES_PASSWORD = "${cfg.args.db-password}"; + }; + ports = [ "${toString cfg.args.db-port}:${toString cfg.args.db-port}" ]; + extraOptions = [ + "--network=host" + ]; }; - ports = ["${toString cfg.args.db-port}:${toString cfg.args.db-port}"]; - extraOptions = [ - "--network=host" - ]; }; }; }; }; - }; } diff --git a/modules/lido/validator-ejector/default.nix b/modules/lido/validator-ejector/default.nix index 570dbf86..15650d5c 100644 --- a/modules/lido/validator-ejector/default.nix +++ b/modules/lido/validator-ejector/default.nix @@ -1,210 +1,213 @@ -{withSystem, ...}: { - flake.nixosModules.lido-validator-ejector = { - pkgs, - config, - lib, - ... - }: let - cfg = config.services.lido-validator-ejector; - package = withSystem pkgs.stdenv.hostPlatform.system ( - {config, ...}: - config.packages.validator-ejector - ); - inherit (import ../../lib.nix {inherit lib;}) toEnvVariables; - in { - options.services.lido-validator-ejector = with lib; { - enable = mkEnableOption (lib.mdDoc "Lido Validator Ejector"); - - args = { - execution-node = mkOption { - type = types.str; - example = "http://1.2.3.4:8545"; - description = '' - Ethereum Execution Node endpoint. - ''; - }; - - consensus-node = mkOption { - type = types.str; - example = "http://1.2.3.4:5051"; - description = '' - Ethereum Consensus Node endpoint. - ''; - }; - - locator-address = mkOption { - type = types.str; - example = "0x123"; - description = '' - Address of the Locator contract Goerli / Mainnet. - ''; - }; - - staking-module-id = mkOption { - type = types.int; - example = 123; - description = '' - Staking Module ID for which operator ID is set, currently only one exists - (NodeOperatorsRegistry) with id 1. - ''; - }; - - operator-id = mkOption { - type = types.int; - example = 123; - description = '' - Operator ID in the Node Operators registry, easiest to get from Operators UI: Goerli/Mainnet. - ''; - }; - - messages-location = mkOption { - type = types.nullOr types.str; - default = null; - example = "messages"; - description = '' - Local folder or external storage bucket url to load json exit message files from. Required if you are using exit messages mode. - ''; - }; - - validator-exit-webhook = mkOption { - type = types.nullOr types.str; - default = null; - example = "http://webhook"; - description = '' - POST validator info to an endpoint instead of sending out an exit message in order to initiate an exit. Required if you are using webhook mode. - ''; - }; - - oracle-addresses-allowlist = mkOption { - type = types.listOf types.str; - example = ["0x123"]; - description = '' - Allowed Oracle addresses to accept transactions from Goerli / Mainnet. - ''; - }; - - messages-password = mkOption { - type = types.nullOr types.str; - default = null; - example = "password"; - description = '' - Password to decrypt encrypted exit messages with. Needed only if you encrypt your exit messages. - ''; - }; - - messages-password-file = mkOption { - type = types.nullOr types.str; - default = null; - example = "password_inside.txt"; - description = '' - Path to a file with password inside to decrypt exit messages with. - Needed only if you have encrypted exit messages. If used, MESSAGES_PASSWORD - (not MESSAGES_PASSWORD_FILE) needs to be added to LOGGER_SECRETS in order to be sanitized. - ''; - }; - - blocks-preload = mkOption { - type = types.nullOr types.int; - default = null; - example = 50000; - description = '' - Amount of blocks to load events from on start. Increase if daemon was not running for some time. Defaults to a week of blocks. - ''; - }; - - blocks-loop = mkOption { - type = types.nullOr types.int; - default = null; - example = 900; - description = '' - Amount of blocks to load events from on every poll. Defaults to 3 hours of blocks. - ''; - }; - - job-interval = mkOption { - type = types.nullOr types.int; - default = null; - example = 384000; - description = '' - Time interval in milliseconds to run checks. Defaults to time of 1 epoch - ''; - }; - - http-port = mkOption { - type = types.nullOr types.port; - default = null; - example = 8989; - description = '' - Port to serve metrics and health check on. - ''; - }; - - run-metrics = mkOption { - type = types.nullOr types.bool; - default = null; - example = false; - description = '' - Enable metrics endpoint. - ''; - }; - - run-health-check = mkOption { - type = types.nullOr types.bool; - default = null; - example = true; - description = '' - Enable health check endpoint - ''; - }; - - logger-level = mkOption { - type = types.nullOr types.str; - default = null; - example = "info"; - description = '' - Severity level from which to start showing errors eg info will hide debug messages - ''; - }; - - logger-format = mkOption { - type = types.nullOr types.str; - default = null; - example = "simple"; - description = '' - Simple or JSON log output: simple/json - ''; - }; - - logger-secrets = mkOption { - type = types.listOf types.str; - default = []; - example = ["MESSAGES_PASSWORD"]; - description = '' - JSON string array of either env var keys to sanitize in logs or exact values - ''; - }; - - dry-run = mkOption { - type = types.nullOr types.bool; - default = null; - example = false; - description = '' - Run the service without actually sending out exit messages - ''; +{ withSystem, ... }: +{ + flake.nixosModules.lido-validator-ejector = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.lido-validator-ejector; + package = withSystem pkgs.stdenv.hostPlatform.system ( + { config, ... }: config.packages.validator-ejector + ); + inherit (import ../../lib.nix { inherit lib; }) toEnvVariables; + in + { + options.services.lido-validator-ejector = with lib; { + enable = mkEnableOption (lib.mdDoc "Lido Validator Ejector"); + + args = { + execution-node = mkOption { + type = types.str; + example = "http://1.2.3.4:8545"; + description = '' + Ethereum Execution Node endpoint. + ''; + }; + + consensus-node = mkOption { + type = types.str; + example = "http://1.2.3.4:5051"; + description = '' + Ethereum Consensus Node endpoint. + ''; + }; + + locator-address = mkOption { + type = types.str; + example = "0x123"; + description = '' + Address of the Locator contract Goerli / Mainnet. + ''; + }; + + staking-module-id = mkOption { + type = types.int; + example = 123; + description = '' + Staking Module ID for which operator ID is set, currently only one exists - (NodeOperatorsRegistry) with id 1. + ''; + }; + + operator-id = mkOption { + type = types.int; + example = 123; + description = '' + Operator ID in the Node Operators registry, easiest to get from Operators UI: Goerli/Mainnet. + ''; + }; + + messages-location = mkOption { + type = types.nullOr types.str; + default = null; + example = "messages"; + description = '' + Local folder or external storage bucket url to load json exit message files from. Required if you are using exit messages mode. + ''; + }; + + validator-exit-webhook = mkOption { + type = types.nullOr types.str; + default = null; + example = "http://webhook"; + description = '' + POST validator info to an endpoint instead of sending out an exit message in order to initiate an exit. Required if you are using webhook mode. + ''; + }; + + oracle-addresses-allowlist = mkOption { + type = types.listOf types.str; + example = [ "0x123" ]; + description = '' + Allowed Oracle addresses to accept transactions from Goerli / Mainnet. + ''; + }; + + messages-password = mkOption { + type = types.nullOr types.str; + default = null; + example = "password"; + description = '' + Password to decrypt encrypted exit messages with. Needed only if you encrypt your exit messages. + ''; + }; + + messages-password-file = mkOption { + type = types.nullOr types.str; + default = null; + example = "password_inside.txt"; + description = '' + Path to a file with password inside to decrypt exit messages with. + Needed only if you have encrypted exit messages. If used, MESSAGES_PASSWORD + (not MESSAGES_PASSWORD_FILE) needs to be added to LOGGER_SECRETS in order to be sanitized. + ''; + }; + + blocks-preload = mkOption { + type = types.nullOr types.int; + default = null; + example = 50000; + description = '' + Amount of blocks to load events from on start. Increase if daemon was not running for some time. Defaults to a week of blocks. + ''; + }; + + blocks-loop = mkOption { + type = types.nullOr types.int; + default = null; + example = 900; + description = '' + Amount of blocks to load events from on every poll. Defaults to 3 hours of blocks. + ''; + }; + + job-interval = mkOption { + type = types.nullOr types.int; + default = null; + example = 384000; + description = '' + Time interval in milliseconds to run checks. Defaults to time of 1 epoch + ''; + }; + + http-port = mkOption { + type = types.nullOr types.port; + default = null; + example = 8989; + description = '' + Port to serve metrics and health check on. + ''; + }; + + run-metrics = mkOption { + type = types.nullOr types.bool; + default = null; + example = false; + description = '' + Enable metrics endpoint. + ''; + }; + + run-health-check = mkOption { + type = types.nullOr types.bool; + default = null; + example = true; + description = '' + Enable health check endpoint + ''; + }; + + logger-level = mkOption { + type = types.nullOr types.str; + default = null; + example = "info"; + description = '' + Severity level from which to start showing errors eg info will hide debug messages + ''; + }; + + logger-format = mkOption { + type = types.nullOr types.str; + default = null; + example = "simple"; + description = '' + Simple or JSON log output: simple/json + ''; + }; + + logger-secrets = mkOption { + type = types.listOf types.str; + default = [ ]; + example = [ "MESSAGES_PASSWORD" ]; + description = '' + JSON string array of either env var keys to sanitize in logs or exact values + ''; + }; + + dry-run = mkOption { + type = types.nullOr types.bool; + default = null; + example = false; + description = '' + Run the service without actually sending out exit messages + ''; + }; }; }; - }; - config = { - virtualisation.oci-containers = lib.mkIf cfg.enable { - containers.lido-validator-ejector = { - image = "lidofinance/validator-ejector:1.6.0"; - environment = toEnvVariables cfg.args; - extraOptions = [ - "--network=host" - ]; - volumes = ["/ethereum/lido/withdrawal-automation:/ethereum/lido/withdrawal-automation"]; + config = { + virtualisation.oci-containers = lib.mkIf cfg.enable { + containers.lido-validator-ejector = { + image = "lidofinance/validator-ejector:1.6.0"; + environment = toEnvVariables cfg.args; + extraOptions = [ + "--network=host" + ]; + volumes = [ "/ethereum/lido/withdrawal-automation:/ethereum/lido/withdrawal-automation" ]; + }; }; }; }; - }; } diff --git a/modules/lido/withdrawals-automation/default.nix b/modules/lido/withdrawals-automation/default.nix index 9dab2cd9..15e7cf0b 100644 --- a/modules/lido/withdrawals-automation/default.nix +++ b/modules/lido/withdrawals-automation/default.nix @@ -1,96 +1,107 @@ -{withSystem, ...}: { - flake.nixosModules.lido-withdrawals-automation = { - pkgs, - config, - lib, - ... - }: let - cfg = config.services.lido-withdrawals-automation; - package = withSystem pkgs.stdenv.hostPlatform.system ( - {config, ...}: - config.packages.lido-withdrawals-automation - ); - inherit (import ../../lib.nix {inherit lib;}) toEnvVariables; - in { - options.services.lido-withdrawals-automation = with lib; { - enable = mkEnableOption (lib.mdDoc "Lido Withdrawals Automation"); - args = { - percentage = mkOption { - type = types.nullOr types.int; - default = null; - example = 10; - }; - kapi-url = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://example.com/kapi"; - }; - remote-signer-url = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://remotesigner.local:8080"; - }; - keymanager-urls = mkOption { - type = types.nullOr types.str; - default = null; - example = "https://example.com/, https://example2.com/"; - }; - password = mkOption { - type = types.str; - example = "mysecretpassword"; - }; - output-folder = mkOption { - type = types.str; - example = "/path/to/your/output-folder"; - }; - operator-id = mkOption { - type = types.int; - example = 123; - }; - beacon-node-url = mkOption { - type = types.nullOr types.str; - default = null; - example = "http://localhost:5052"; - }; - module-id = mkOption { - type = types.nullOr types.int; - default = null; - example = 1; - }; - keymanager-token-file = mkOption { - type = types.nullOr types.path; - default = null; - example = ./token; - }; - overwrite = mkOption { - type = types.nullOr (types.enum ["always" "never" "prompt"]); - default = null; - example = "always"; +{ withSystem, ... }: +{ + flake.nixosModules.lido-withdrawals-automation = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.lido-withdrawals-automation; + package = withSystem pkgs.stdenv.hostPlatform.system ( + { config, ... }: config.packages.lido-withdrawals-automation + ); + inherit (import ../../lib.nix { inherit lib; }) toEnvVariables; + in + { + options.services.lido-withdrawals-automation = with lib; { + enable = mkEnableOption (lib.mdDoc "Lido Withdrawals Automation"); + args = { + percentage = mkOption { + type = types.nullOr types.int; + default = null; + example = 10; + }; + kapi-url = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://example.com/kapi"; + }; + remote-signer-url = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://remotesigner.local:8080"; + }; + keymanager-urls = mkOption { + type = types.nullOr types.str; + default = null; + example = "https://example.com/, https://example2.com/"; + }; + password = mkOption { + type = types.str; + example = "mysecretpassword"; + }; + output-folder = mkOption { + type = types.str; + example = "/path/to/your/output-folder"; + }; + operator-id = mkOption { + type = types.int; + example = 123; + }; + beacon-node-url = mkOption { + type = types.nullOr types.str; + default = null; + example = "http://localhost:5052"; + }; + module-id = mkOption { + type = types.nullOr types.int; + default = null; + example = 1; + }; + keymanager-token-file = mkOption { + type = types.nullOr types.path; + default = null; + example = ./token; + }; + overwrite = mkOption { + type = types.nullOr ( + types.enum [ + "always" + "never" + "prompt" + ] + ); + default = null; + example = "always"; + }; }; }; - }; - config = { - systemd.services.lido-withdrawals-automation = lib.mkIf cfg.enable { - description = "Lido Withdrawals Automation"; + config = { + systemd.services.lido-withdrawals-automation = lib.mkIf cfg.enable { + description = "Lido Withdrawals Automation"; - wantedBy = ["multi-user.target"]; + wantedBy = [ "multi-user.target" ]; - environment = toEnvVariables cfg.args; + environment = toEnvVariables cfg.args; - path = [package]; + path = [ package ]; - serviceConfig = lib.mkMerge [ - { - Group = "lido"; - ExecStart = lib.getExe (pkgs.writeShellApplication { - name = "repl"; - text = '' - ${lib.getExe package} - ''; - }); - } - ]; + serviceConfig = lib.mkMerge [ + { + Group = "lido"; + ExecStart = lib.getExe ( + pkgs.writeShellApplication { + name = "repl"; + text = '' + ${lib.getExe package} + ''; + } + ); + } + ]; + }; }; }; - }; } diff --git a/modules/pyroscope/default.nix b/modules/pyroscope/default.nix index 19f97b2a..3302e87c 100644 --- a/modules/pyroscope/default.nix +++ b/modules/pyroscope/default.nix @@ -1,28 +1,28 @@ -{withSystem, ...}: { - flake.nixosModules.pyroscope = { - pkgs, - config, - lib, - ... - }: let - cfg = - config.services.pyroscope; - package = withSystem pkgs.stdenv.hostPlatform.system ( - {config, ...}: - config.packages.pyroscope - ); - in { - options.services.pyroscope = with lib; { - enable = mkEnableOption (lib.mdDoc "Grafana Agent (Flow mode)"); - }; - config = { - systemd.services.pyroscope = lib.mkIf cfg.enable { - description = "Pyroscope"; - wantedBy = ["multi-user.target"]; - serviceConfig = { - ExecStart = ''${lib.getExe package}''; +{ withSystem, ... }: +{ + flake.nixosModules.pyroscope = + { + pkgs, + config, + lib, + ... + }: + let + cfg = config.services.pyroscope; + package = withSystem pkgs.stdenv.hostPlatform.system ({ config, ... }: config.packages.pyroscope); + in + { + options.services.pyroscope = with lib; { + enable = mkEnableOption (lib.mdDoc "Grafana Agent (Flow mode)"); + }; + config = { + systemd.services.pyroscope = lib.mkIf cfg.enable { + description = "Pyroscope"; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + ExecStart = ''${lib.getExe package}''; + }; }; }; }; - }; } diff --git a/modules/shard-split/default.nix b/modules/shard-split/default.nix index d308c420..dd60a112 100644 --- a/modules/shard-split/default.nix +++ b/modules/shard-split/default.nix @@ -1,34 +1,45 @@ -{lib, ...}: let +{ lib, ... }: +let inherit (import ../../lib/shard-attrs.nix lib) shardAttrs; -in { - flake.flakeModules.shardSplit = {config, ...}: let - cfg = config.mcl.matrix.shard; - in { - options.mcl.matrix.shard = with lib; { - size = mkOption { - type = types.numbers.positive; - default = 1; - description = "Number of shards to use for parallel builds"; - }; - attributePath = mkOption { - type = types.listOf types.str; - default = ["legacyPackages" "checks"]; - description = "The attribute path to split into shards"; +in +{ + flake.flakeModules.shardSplit = + { config, ... }: + let + cfg = config.mcl.matrix.shard; + in + { + options.mcl.matrix.shard = with lib; { + size = mkOption { + type = types.numbers.positive; + default = 1; + description = "Number of shards to use for parallel builds"; + }; + attributePath = mkOption { + type = types.listOf types.str; + default = [ + "legacyPackages" + "checks" + ]; + description = "The attribute path to split into shards"; + }; }; - }; - config = { - perSystem = {self', ...}: let - inherit (cfg) size attributePath; - attrs = lib.attrByPath attributePath {} self'; - shards = shardAttrs attrs size; - in { - legacyPackages.mcl.matrix = { - shardCount = builtins.length (builtins.attrValues shards); - shardSize = cfg.size; - inherit shards attributePath; - }; + config = { + perSystem = + { self', ... }: + let + inherit (cfg) size attributePath; + attrs = lib.attrByPath attributePath { } self'; + shards = shardAttrs attrs size; + in + { + legacyPackages.mcl.matrix = { + shardCount = builtins.length (builtins.attrValues shards); + shardSize = cfg.size; + inherit shards attributePath; + }; + }; }; }; - }; } diff --git a/modules/users.nix b/modules/users.nix index 556e53ca..43e5844c 100644 --- a/modules/users.nix +++ b/modules/users.nix @@ -2,19 +2,22 @@ usersDir, rootDir, machinesDir, -}: { +}: +{ config, lib, ... -}: let +}: +let cfg = config.users; - enabled = cfg.includedUsers != [] || cfg.includedGroups != []; + enabled = cfg.includedUsers != [ ] || cfg.includedGroups != [ ]; - utils = import ../lib {inherit usersDir rootDir machinesDir;}; + utils = import ../lib { inherit usersDir rootDir machinesDir; }; allUsers = utils.usersInfo; - allGroups = let - predefinedGroups = config.ids.gids; - in + allGroups = + let + predefinedGroups = config.ids.gids; + in utils.allAssignedGroups' allUsers predefinedGroups; allUserNames = builtins.attrNames allUsers; allGroupNames = builtins.attrNames allGroups; @@ -23,29 +26,38 @@ (lib.getAttrs cfg.includedUsers allUsers) // (utils.allUsersMembersOfAnyGroup' allUsers cfg.includedGroups); - powerUserSystemGroups = ["docker" "podman" "lxd" "plugdev" "libvirtd" "vboxusers"]; + powerUserSystemGroups = [ + "docker" + "podman" + "lxd" + "plugdev" + "libvirtd" + "vboxusers" + ]; - selectedUsers = - builtins.mapAttrs - ( - user: userConfig: - userConfig - // { - extraGroups = (userConfig.extraGroups or []) ++ powerUserSystemGroups; - } - ) - selectedUsers'; + selectedUsers = builtins.mapAttrs ( + user: userConfig: + userConfig + // { + extraGroups = (userConfig.extraGroups or [ ]) ++ powerUserSystemGroups; + } + ) selectedUsers'; - selectedGroups = let - predefinedGroups = config.ids.gids; - in + selectedGroups = + let + predefinedGroups = config.ids.gids; + in utils.allAssignedGroups' selectedUsers predefinedGroups; -in { +in +{ options.users = with lib; { includedUsers = mkOption { type = types.listOf (types.enum allUserNames); - default = []; - example = ["zahary" "johnny"]; + default = [ ]; + example = [ + "zahary" + "johnny" + ]; description = '' List of MetaCraft Labs users to be included in the system. ''; @@ -53,8 +65,11 @@ in { includedGroups = mkOption { type = types.listOf (types.enum allGroupNames); - default = []; - example = ["devops" "dendreth"]; + default = [ ]; + example = [ + "devops" + "dendreth" + ]; description = '' List of groups of MetaCraft Labs users to be included in the system. ''; diff --git a/packages/default.nix b/packages/default.nix index cc249985..41543953 100644 --- a/packages/default.nix +++ b/packages/default.nix @@ -1,89 +1,76 @@ -{lib, ...}: { - perSystem = { - inputs', - pkgs, - ... - }: let - inherit (lib) optionalAttrs versionAtLeast; - inherit (pkgs) system; - inherit (pkgs.hostPlatform) isLinux; - in rec { - legacyPackages = { - inputs = { - nixpkgs = rec { - inherit (pkgs) cachix; - nix = let - nixStable = pkgs.nixVersions.stable; - in - assert versionAtLeast nixStable.version "2.24.10"; nixStable; - nix-eval-jobs = pkgs.nix-eval-jobs.override {inherit nix;}; - nix-fast-build = pkgs.nix-fast-build.override {inherit nix-eval-jobs;}; +{ lib, ... }: +{ + perSystem = + { + inputs', + pkgs, + ... + }: + let + inherit (lib) optionalAttrs versionAtLeast; + inherit (pkgs) system; + inherit (pkgs.hostPlatform) isLinux; + in + rec { + legacyPackages = { + inputs = { + nixpkgs = rec { + inherit (pkgs) cachix; + nix = + let + nixStable = pkgs.nixVersions.stable; + in + assert versionAtLeast nixStable.version "2.24.10"; + nixStable; + nix-eval-jobs = pkgs.nix-eval-jobs.override { inherit nix; }; + nix-fast-build = pkgs.nix-fast-build.override { inherit nix-eval-jobs; }; + }; + agenix = inputs'.agenix.packages; + devenv = inputs'.devenv.packages; + disko = inputs'.disko.packages; + dlang-nix = inputs'.dlang-nix.packages; + ethereum-nix = inputs'.ethereum-nix.packages; + fenix = inputs'.fenix.packages; + git-hooks-nix = inputs'.git-hooks-nix.packages; + microvm = inputs'.microvm.packages; + nix-fast-build = inputs'.nix-fast-build.packages; + nixos-anywhere = inputs'.nixos-anywhere.packages; + terranix = inputs'.terranix.packages; + treefmt-nix = inputs'.treefmt-nix.packages; }; - agenix = inputs'.agenix.packages; - devenv = inputs'.devenv.packages; - disko = inputs'.disko.packages; - dlang-nix = inputs'.dlang-nix.packages; - ethereum-nix = inputs'.ethereum-nix.packages; - fenix = inputs'.fenix.packages; - git-hooks-nix = inputs'.git-hooks-nix.packages; - microvm = inputs'.microvm.packages; - nix-fast-build = inputs'.nix-fast-build.packages; - nixos-anywhere = inputs'.nixos-anywhere.packages; - terranix = inputs'.terranix.packages; - treefmt-nix = inputs'.treefmt-nix.packages; - }; - rustToolchain = with inputs'.fenix.packages; - with latest; - combine [ - cargo - clippy - rust-analyzer - rust-src - rustc - rustfmt - targets.wasm32-wasi.latest.rust-std - ]; - }; + rustToolchain = + with inputs'.fenix.packages; + with latest; + combine [ + cargo + clippy + rust-analyzer + rust-src + rustc + rustfmt + targets.wasm32-wasi.latest.rust-std + ]; + }; - packages = - { - lido-withdrawals-automation = pkgs.callPackage ./lido-withdrawals-automation {}; - pyroscope = pkgs.callPackage ./pyroscope {}; - } - // optionalAttrs (system == "x86_64-linux" || system == "aarch64-darwin") { - grafana-agent = import ./grafana-agent {inherit inputs';}; - } - // optionalAttrs isLinux { - folder-size-metrics = pkgs.callPackage ./folder-size-metrics {}; - } - // optionalAttrs (system == "x86_64-linux") { - mcl = pkgs.callPackage ./mcl { - buildDubPackage = inputs'.dlang-nix.legacyPackages.buildDubPackage.override { - ldc = inputs'.dlang-nix.packages."ldc-binary-1_34_0"; + packages = + { + lido-withdrawals-automation = pkgs.callPackage ./lido-withdrawals-automation { }; + pyroscope = pkgs.callPackage ./pyroscope { }; + } + // optionalAttrs (system == "x86_64-linux" || system == "aarch64-darwin") { + grafana-agent = import ./grafana-agent { inherit inputs'; }; + } + // optionalAttrs isLinux { + folder-size-metrics = pkgs.callPackage ./folder-size-metrics { }; + } + // optionalAttrs (system == "x86_64-linux") { + mcl = pkgs.callPackage ./mcl { + buildDubPackage = inputs'.dlang-nix.legacyPackages.buildDubPackage.override { + ldc = inputs'.dlang-nix.packages."ldc-binary-1_34_0"; + }; + inherit (legacyPackages.inputs.nixpkgs) cachix nix nix-eval-jobs; }; - inherit (legacyPackages.inputs.nixpkgs) cachix nix nix-eval-jobs; }; - }; - checks = - packages - // { - inherit (legacyPackages) rustToolchain; - inherit (legacyPackages.inputs.dlang-nix) dub; - inherit (legacyPackages.inputs.nixpkgs) cachix nix nix-eval-jobs nix-fast-build; - inherit (legacyPackages.inputs.ethereum-nix) foundry; - } - // optionalAttrs (system == "x86_64-linux" || system == "aarch64-darwin") { - inherit (legacyPackages.inputs.ethereum-nix) geth; - } - // optionalAttrs isLinux { - inherit (inputs'.validator-ejector.packages) validator-ejector; - } - // optionalAttrs (system == "x86_64-linux") { - inherit (pkgs) terraform; - inherit (legacyPackages.inputs.terranix) terranix; - inherit (legacyPackages.inputs.dlang-nix) dcd dscanner serve-d dmd ldc; - inherit (legacyPackages.inputs.ethereum-nix) mev-boost nethermind web3signer nimbus-eth2; - }; - }; + }; } diff --git a/packages/folder-size-metrics/default.nix b/packages/folder-size-metrics/default.nix index ce935e54..b657a958 100644 --- a/packages/folder-size-metrics/default.nix +++ b/packages/folder-size-metrics/default.nix @@ -1,7 +1,6 @@ -{pkgs, ...}: +{ pkgs, ... }: pkgs.writers.writePython3Bin "folder-size-metrics.py" { libraries = [ pkgs.python3Packages.prometheus-client ]; -} -./src/app.py +} ./src/app.py diff --git a/packages/grafana-agent/default.nix b/packages/grafana-agent/default.nix index 385f5edb..7d7a84d3 100644 --- a/packages/grafana-agent/default.nix +++ b/packages/grafana-agent/default.nix @@ -1,2 +1,4 @@ -{inputs'}: -inputs'.nixpkgs-unstable.legacyPackages.grafana-agent.overrideAttrs (old: {subPackages = old.subPackages ++ ["cmd/grafana-agent-flow"];}) +{ inputs' }: +inputs'.nixpkgs-unstable.legacyPackages.grafana-agent.overrideAttrs (old: { + subPackages = old.subPackages ++ [ "cmd/grafana-agent-flow" ]; +}) diff --git a/packages/mcl/default.nix b/packages/mcl/default.nix index f7ea7ac5..f99c12b9 100644 --- a/packages/mcl/default.nix +++ b/packages/mcl/default.nix @@ -5,7 +5,8 @@ nix, nix-eval-jobs, ... -}: let +}: +let deps = [ nix @@ -32,31 +33,40 @@ ] ); in - buildDubPackage rec { - pname = "mcl"; - version = "unstable"; - src = lib.fileset.toSource { - root = ./.; - fileset = - lib.fileset.fileFilter - (file: builtins.any file.hasExt ["d" "sdl" "json" "nix"]) - ./.; - }; +buildDubPackage rec { + pname = "mcl"; + version = "unstable"; + src = lib.fileset.toSource { + root = ./.; + fileset = lib.fileset.fileFilter ( + file: + builtins.any file.hasExt [ + "d" + "sdl" + "json" + "nix" + ] + ) ./.; + }; - nativeBuildInputs = [pkgs.makeWrapper] ++ deps; + nativeBuildInputs = [ pkgs.makeWrapper ] ++ deps; - postFixup = '' - wrapProgram $out/bin/${pname} --set PATH "${lib.makeBinPath deps}" - ''; + postFixup = '' + wrapProgram $out/bin/${pname} --set PATH "${lib.makeBinPath deps}" + ''; - dubBuildFlags = ["--compiler=dmd" "-b" "debug"]; + dubBuildFlags = [ + "--compiler=dmd" + "-b" + "debug" + ]; - dubTestFlags = [ - "--compiler=dmd" - "--" - "-e" - excludedTests - ]; + dubTestFlags = [ + "--compiler=dmd" + "--" + "-e" + excludedTests + ]; - meta.mainProgram = pname; - } + meta.mainProgram = pname; +} diff --git a/packages/mcl/src/src/mcl/commands/ci_matrix.d b/packages/mcl/src/src/mcl/commands/ci_matrix.d index a8c0d1f2..e02c329f 100755 --- a/packages/mcl/src/src/mcl/commands/ci_matrix.d +++ b/packages/mcl/src/src/mcl/commands/ci_matrix.d @@ -266,24 +266,24 @@ unittest { { auto testJSON = `{ - "attr": "home/bean-desktop", - "attrPath": [ "home/bean-desktop" ], - "cacheStatus": "notBuilt", - "drvPath": "/nix/store/jp7qgm9mgikksypzljrbhmxa31xmmq1x-home-manager-generation.drv", - "inputDrvs": { - "/nix/store/0hkqmn0z40yx89kd5wgfjxzqckvjkiw3-home-manager-files.drv": [ "out" ], - "/nix/store/0khqc4m8jrv5gkg2jwf5xz46bkmz2qxl-dconf-keys.json.drv": [ "out" ], - "/nix/store/5rydfkrpd5vdpz4qxsypivxwy9y6z8gl-bash-5.2p26.drv": [ "out" ], - "/nix/store/7vgw0fqilqwa9l26arqpym1l4iisgff1-stdenv-linux.drv": [ "out" ], - "/nix/store/96ji6f4cijfc23jz98x45xm1dvzz5hq8-activation-script.drv": [ "out" ], - "/nix/store/m60vlf9j0g8y82avg5x90nbg554wshva-home-manager-path.drv": [ "out" ] - }, - "isCached": false, - "name": "home-manager-generation", - "outputs": { - "out": "/nix/store/30qrziyj0vbg6n43bbh08ql0xbnsy76d-home-manager-generation" - }, - "system": "x86_64-linux" + "attr": "home/bean-desktop", + "attrPath": [ "home/bean-desktop" ], + "cacheStatus": "notBuilt", + "drvPath": "/nix/store/jp7qgm9mgikksypzljrbhmxa31xmmq1x-home-manager-generation.drv", + "inputDrvs": { + "/nix/store/0hkqmn0z40yx89kd5wgfjxzqckvjkiw3-home-manager-files.drv": [ "out" ], + "/nix/store/0khqc4m8jrv5gkg2jwf5xz46bkmz2qxl-dconf-keys.json.drv": [ "out" ], + "/nix/store/5rydfkrpd5vdpz4qxsypivxwy9y6z8gl-bash-5.2p26.drv": [ "out" ], + "/nix/store/7vgw0fqilqwa9l26arqpym1l4iisgff1-stdenv-linux.drv": [ "out" ], + "/nix/store/96ji6f4cijfc23jz98x45xm1dvzz5hq8-activation-script.drv": [ "out" ], + "/nix/store/m60vlf9j0g8y82avg5x90nbg554wshva-home-manager-path.drv": [ "out" ] + }, + "isCached": false, + "name": "home-manager-generation", + "outputs": { + "out": "/nix/store/30qrziyj0vbg6n43bbh08ql0xbnsy76d-home-manager-generation" + }, + "system": "x86_64-linux" }`.parseJSON; auto testPackage = testJSON.packageFromNixEvalJobsJson( diff --git a/packages/mcl/src/src/mcl/commands/host_info.d b/packages/mcl/src/src/mcl/commands/host_info.d index 66bce461..1e45ff40 100644 --- a/packages/mcl/src/src/mcl/commands/host_info.d +++ b/packages/mcl/src/src/mcl/commands/host_info.d @@ -657,9 +657,9 @@ MachineConfigInfo getMachineConfigInfo() if (_module != "" && ( // Mass-storage controller. Definitely important. _class.startsWith("0x01") || - //Firewire controller. A disk might be attached. + //Firewire controller. A disk might be attached. _class.startsWith("0x0c00") || - //USB controller. Needed if we want to use the + //USB controller. Needed if we want to use the // keyboard when things go wrong in the initrd. _class.startsWith("0x0c03"))) { diff --git a/packages/mcl/src/src/mcl/utils/test/eval.json b/packages/mcl/src/src/mcl/utils/test/eval.json index 7c07f315..20c28be6 100644 --- a/packages/mcl/src/src/mcl/utils/test/eval.json +++ b/packages/mcl/src/src/mcl/utils/test/eval.json @@ -30,4 +30,4 @@ "category": [] } ] -} \ No newline at end of file +} diff --git a/packages/mcl/src/src/mcl/utils/test/eval.nix b/packages/mcl/src/src/mcl/utils/test/eval.nix index 9f1bdff6..08a62777 100644 --- a/packages/mcl/src/src/mcl/utils/test/eval.nix +++ b/packages/mcl/src/src/mcl/utils/test/eval.nix @@ -3,28 +3,30 @@ books = [ { author = "Bilbo/Frodo/Sam"; - category = []; + category = [ ]; title = "The Red Book of Westmarch"; } { author = "Meriadoc Brandybuck, Master of Buckland"; - category = []; + category = [ ]; title = "Herblore of the Shire "; } { author = "Meriadoc Brandybuck, Master of Buckland"; - category = ["Scholarly"]; + category = [ "Scholarly" ]; title = "Reckoning of Years"; } { - category = []; + category = [ ]; title = "The Tale of Years"; } { author = "Ori"; - category = []; + category = [ ]; title = "The Book of Mazarbul"; } ]; - sources = ["https://middle-earth.xenite.org/what-are-the-various-books-named-in-the-lord-of-the-rings/"]; + sources = [ + "https://middle-earth.xenite.org/what-are-the-various-books-named-in-the-lord-of-the-rings/" + ]; } diff --git a/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-no-shards/flake.nix b/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-no-shards/flake.nix index 14739010..e2f7b544 100644 --- a/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-no-shards/flake.nix +++ b/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-no-shards/flake.nix @@ -1,7 +1,3 @@ { - outputs = - { ... }: - { - - }; + outputs = { ... }: { }; } diff --git a/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-ok/flake.nix b/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-ok/flake.nix index 23d7b944..ec0fadea 100644 --- a/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-ok/flake.nix +++ b/packages/mcl/src/src/mcl/utils/test/nix/shard-matrix-ok/flake.nix @@ -5,33 +5,43 @@ flake-parts.follows = "nixos-modules/flake-parts"; }; - outputs = inputs @ { - flake-parts, - nixos-modules, - ... - }: - flake-parts.lib.mkFlake {inherit inputs;} { - systems = ["x86_64-linux"]; - imports = [nixos-modules.flakeModules.shardSplit]; + outputs = + inputs@{ + flake-parts, + nixos-modules, + ... + }: + flake-parts.lib.mkFlake { inherit inputs; } { + systems = [ "x86_64-linux" ]; + imports = [ nixos-modules.flakeModules.shardSplit ]; mcl.matrix.shard = { size = 10; - attributePath = ["legacyPackages" "ci-checks"]; - }; - - perSystem = { - pkgs, - lib, - ... - }: { - legacyPackages.ci-checks = lib.pipe (lib.range 0 100) [ - (map builtins.toString) - (x: - lib.genAttrs x (i: - pkgs.runCommandLocal "test-${i}" {} '' - echo 'The answer is ${i}!' > $out - '')) + attributePath = [ + "legacyPackages" + "ci-checks" ]; }; + + perSystem = + { + pkgs, + lib, + ... + }: + { + legacyPackages.ci-checks = lib.pipe (lib.range 0 100) [ + (map builtins.toString) + ( + x: + lib.genAttrs x ( + i: + pkgs.runCommandLocal "test-${i}" { } '' + echo 'The answer is ${i}!' > $out + '' + ) + ) + ]; + }; }; } diff --git a/packages/mcl/src/src/mcl/utils/test/test.nix b/packages/mcl/src/src/mcl/utils/test/test.nix index 4b18771b..e9917afc 100644 --- a/packages/mcl/src/src/mcl/utils/test/test.nix +++ b/packages/mcl/src/src/mcl/utils/test/test.nix @@ -1,2 +1 @@ -with import {}; - writeShellScriptBin "helloWorld" "echo Hello World" +with import { }; writeShellScriptBin "helloWorld" "echo Hello World" diff --git a/packages/pyroscope/default.nix b/packages/pyroscope/default.nix index 306054ad..d30f80d1 100644 --- a/packages/pyroscope/default.nix +++ b/packages/pyroscope/default.nix @@ -21,10 +21,16 @@ buildGoModule rec { # export GOWORK=off # ''; - buildInputs = with pkgs; [gcc pkg-config]; + buildInputs = with pkgs; [ + gcc + pkg-config + ]; nativeBuildInputs = buildInputs; - subPackages = ["cmd/pyroscope" "cmd/profilecli"]; + subPackages = [ + "cmd/pyroscope" + "cmd/profilecli" + ]; ldflags = [ "-X=github.com/grafana/pyroscope/pkg/util/build.Branch=${src.rev}" @@ -38,7 +44,7 @@ buildGoModule rec { homepage = "https://github.com/grafana/pyroscope"; changelog = "https://github.com/grafana/pyroscope/blob/${src.rev}/CHANGELOG.md"; license = licenses.agpl3Only; - maintainers = with maintainers; []; + maintainers = with maintainers; [ ]; mainProgram = "pyroscope"; }; } diff --git a/shells/ci.nix b/shells/ci.nix deleted file mode 100644 index 1a6f9976..00000000 --- a/shells/ci.nix +++ /dev/null @@ -1,8 +0,0 @@ -{pkgs, ...}: -pkgs.mkShellNoCC { - packages = with pkgs; [ - just - jq - nix-eval-jobs - ]; -} diff --git a/shells/default.nix b/shells/default.nix index 92346449..adb62e86 100644 --- a/shells/default.nix +++ b/shells/default.nix @@ -1,35 +1,54 @@ +{ inputs, ... }: { - pkgs, - flake, - inputs', - ... -}: let - repl = pkgs.writeShellApplication { - name = "repl"; - text = '' - nix repl --file "$REPO_ROOT/repl.nix"; - ''; - }; -in - pkgs.mkShell { - packages = with pkgs; [ - inputs'.agenix.packages.agenix - inputs'.nixos-anywhere.packages.nixos-anywhere - figlet - just - jq - nix-eval-jobs - nixos-rebuild - nix-output-monitor - repl - rage - inputs'.dlang-nix.packages.dmd - inputs'.dlang-nix.packages.dub - act - ]; + imports = [ + (import ../checks/pre-commit.nix { + inherit inputs; + }).flake.flakeModules.git-hooks + ]; - shellHook = '' - export REPO_ROOT="$PWD" - figlet -t "${flake.description}" - ''; - } + perSystem = + { + pkgs, + inputs', + config, + ... + }: + { + devShells.default = + let + repl = pkgs.writeShellApplication { + name = "repl"; + text = '' + nix repl --file "$REPO_ROOT/repl.nix"; + ''; + }; + in + pkgs.mkShell { + packages = + with pkgs; + [ + inputs'.agenix.packages.agenix + inputs'.nixos-anywhere.packages.nixos-anywhere + figlet + just + jq + nix-eval-jobs + nixos-rebuild + nix-output-monitor + repl + rage + inputs'.dlang-nix.packages.dub + ] + ++ pkgs.lib.optionals (pkgs.stdenv.system == "x86_64-linux") [ + inputs'.dlang-nix.packages.dmd + ]; + + shellHook = + '' + export REPO_ROOT="$PWD" + figlet -t "Metacraft Nixos Modules" + '' + + config.pre-commit.installationScript; + }; + }; +} diff --git a/statix.toml b/statix.toml new file mode 100644 index 00000000..c6fd6470 --- /dev/null +++ b/statix.toml @@ -0,0 +1,3 @@ +disabled = [ + "empty_pattern" +]